]>
Commit | Line | Data |
---|---|---|
ee7e5516 DB |
1 | /* |
2 | * Coherent per-device memory handling. | |
3 | * Borrowed from i386 | |
4 | */ | |
5a0e3ad6 | 5 | #include <linux/slab.h> |
ee7e5516 | 6 | #include <linux/kernel.h> |
08a999ce | 7 | #include <linux/module.h> |
ee7e5516 DB |
8 | #include <linux/dma-mapping.h> |
9 | ||
10 | struct dma_coherent_mem { | |
11 | void *virt_base; | |
ed1d218c | 12 | dma_addr_t device_base; |
bca0fa5f | 13 | phys_addr_t pfn_base; |
ee7e5516 DB |
14 | int size; |
15 | int flags; | |
16 | unsigned long *bitmap; | |
17 | }; | |
18 | ||
19 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |
20 | dma_addr_t device_addr, size_t size, int flags) | |
21 | { | |
22 | void __iomem *mem_base = NULL; | |
23 | int pages = size >> PAGE_SHIFT; | |
24 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | |
25 | ||
26 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | |
27 | goto out; | |
28 | if (!size) | |
29 | goto out; | |
30 | if (dev->dma_mem) | |
31 | goto out; | |
32 | ||
33 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | |
34 | ||
35 | mem_base = ioremap(bus_addr, size); | |
36 | if (!mem_base) | |
37 | goto out; | |
38 | ||
39 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | |
40 | if (!dev->dma_mem) | |
41 | goto out; | |
42 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | |
43 | if (!dev->dma_mem->bitmap) | |
44 | goto free1_out; | |
45 | ||
46 | dev->dma_mem->virt_base = mem_base; | |
47 | dev->dma_mem->device_base = device_addr; | |
bca0fa5f | 48 | dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); |
ee7e5516 DB |
49 | dev->dma_mem->size = pages; |
50 | dev->dma_mem->flags = flags; | |
51 | ||
52 | if (flags & DMA_MEMORY_MAP) | |
53 | return DMA_MEMORY_MAP; | |
54 | ||
55 | return DMA_MEMORY_IO; | |
56 | ||
57 | free1_out: | |
58 | kfree(dev->dma_mem); | |
59 | out: | |
60 | if (mem_base) | |
61 | iounmap(mem_base); | |
62 | return 0; | |
63 | } | |
64 | EXPORT_SYMBOL(dma_declare_coherent_memory); | |
65 | ||
66 | void dma_release_declared_memory(struct device *dev) | |
67 | { | |
68 | struct dma_coherent_mem *mem = dev->dma_mem; | |
69 | ||
70 | if (!mem) | |
71 | return; | |
72 | dev->dma_mem = NULL; | |
73 | iounmap(mem->virt_base); | |
74 | kfree(mem->bitmap); | |
75 | kfree(mem); | |
76 | } | |
77 | EXPORT_SYMBOL(dma_release_declared_memory); | |
78 | ||
79 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
80 | dma_addr_t device_addr, size_t size) | |
81 | { | |
82 | struct dma_coherent_mem *mem = dev->dma_mem; | |
83 | int pos, err; | |
ee7e5516 | 84 | |
d2dc1f4a | 85 | size += device_addr & ~PAGE_MASK; |
ee7e5516 DB |
86 | |
87 | if (!mem) | |
88 | return ERR_PTR(-EINVAL); | |
89 | ||
90 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | |
d2dc1f4a | 91 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); |
ee7e5516 DB |
92 | if (err != 0) |
93 | return ERR_PTR(err); | |
94 | return mem->virt_base + (pos << PAGE_SHIFT); | |
95 | } | |
96 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |
97 | ||
b6d4f7e3 | 98 | /** |
cb3952bf | 99 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area |
b6d4f7e3 DB |
100 | * |
101 | * @dev: device from which we allocate memory | |
102 | * @size: size of requested memory area | |
103 | * @dma_handle: This will be filled with the correct dma handle | |
104 | * @ret: This pointer will be filled with the virtual address | |
0609697e | 105 | * to allocated area. |
b6d4f7e3 | 106 | * |
cb3952bf | 107 | * This function should be only called from per-arch dma_alloc_coherent() |
b6d4f7e3 DB |
108 | * to support allocation from per-device coherent memory pools. |
109 | * | |
110 | * Returns 0 if dma_alloc_coherent should continue with allocating from | |
cb3952bf | 111 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. |
b6d4f7e3 | 112 | */ |
ee7e5516 DB |
113 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
114 | dma_addr_t *dma_handle, void **ret) | |
115 | { | |
eccd83e1 | 116 | struct dma_coherent_mem *mem; |
ee7e5516 | 117 | int order = get_order(size); |
eccd83e1 | 118 | int pageno; |
ee7e5516 | 119 | |
eccd83e1 AM |
120 | if (!dev) |
121 | return 0; | |
122 | mem = dev->dma_mem; | |
123 | if (!mem) | |
124 | return 0; | |
0609697e PM |
125 | |
126 | *ret = NULL; | |
127 | ||
cdf57cab | 128 | if (unlikely(size > (mem->size << PAGE_SHIFT))) |
0609697e | 129 | goto err; |
eccd83e1 AM |
130 | |
131 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | |
0609697e PM |
132 | if (unlikely(pageno < 0)) |
133 | goto err; | |
134 | ||
135 | /* | |
136 | * Memory was found in the per-device area. | |
137 | */ | |
138 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | |
139 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | |
140 | memset(*ret, 0, size); | |
141 | ||
eccd83e1 | 142 | return 1; |
0609697e PM |
143 | |
144 | err: | |
145 | /* | |
146 | * In the case where the allocation can not be satisfied from the | |
147 | * per-device area, try to fall back to generic memory if the | |
148 | * constraints allow it. | |
149 | */ | |
150 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | |
ee7e5516 | 151 | } |
a38409fb | 152 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
ee7e5516 | 153 | |
b6d4f7e3 | 154 | /** |
cb3952bf | 155 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool |
b6d4f7e3 DB |
156 | * @dev: device from which the memory was allocated |
157 | * @order: the order of pages allocated | |
158 | * @vaddr: virtual address of allocated pages | |
159 | * | |
160 | * This checks whether the memory was allocated from the per-device | |
161 | * coherent memory pool and if so, releases that memory. | |
162 | * | |
163 | * Returns 1 if we correctly released the memory, or 0 if | |
cb3952bf | 164 | * dma_release_coherent() should proceed with releasing memory from |
b6d4f7e3 DB |
165 | * generic pools. |
166 | */ | |
ee7e5516 DB |
167 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) |
168 | { | |
169 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
170 | ||
171 | if (mem && vaddr >= mem->virt_base && vaddr < | |
172 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
173 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
174 | ||
175 | bitmap_release_region(mem->bitmap, page, order); | |
176 | return 1; | |
177 | } | |
178 | return 0; | |
179 | } | |
a38409fb | 180 | EXPORT_SYMBOL(dma_release_from_coherent); |
bca0fa5f MS |
181 | |
182 | /** | |
183 | * dma_mmap_from_coherent() - try to mmap the memory allocated from | |
184 | * per-device coherent memory pool to userspace | |
185 | * @dev: device from which the memory was allocated | |
186 | * @vma: vm_area for the userspace memory | |
187 | * @vaddr: cpu address returned by dma_alloc_from_coherent | |
188 | * @size: size of the memory buffer allocated by dma_alloc_from_coherent | |
6e7b4a59 | 189 | * @ret: result from remap_pfn_range() |
bca0fa5f MS |
190 | * |
191 | * This checks whether the memory was allocated from the per-device | |
192 | * coherent memory pool and if so, maps that memory to the provided vma. | |
193 | * | |
ba4d93bc LP |
194 | * Returns 1 if we correctly mapped the memory, or 0 if the caller should |
195 | * proceed with mapping memory from generic pools. | |
bca0fa5f MS |
196 | */ |
197 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | |
198 | void *vaddr, size_t size, int *ret) | |
199 | { | |
200 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
201 | ||
202 | if (mem && vaddr >= mem->virt_base && vaddr + size <= | |
203 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
204 | unsigned long off = vma->vm_pgoff; | |
205 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
206 | int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | |
207 | int count = size >> PAGE_SHIFT; | |
208 | ||
209 | *ret = -ENXIO; | |
210 | if (off < count && user_count <= count - off) { | |
211 | unsigned pfn = mem->pfn_base + start + off; | |
212 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, | |
213 | user_count << PAGE_SHIFT, | |
214 | vma->vm_page_prot); | |
215 | } | |
216 | return 1; | |
217 | } | |
218 | return 0; | |
219 | } | |
220 | EXPORT_SYMBOL(dma_mmap_from_coherent); |