]>
Commit | Line | Data |
---|---|---|
d15bd7ee SS |
1 | /* |
2 | * Framework for buffer objects that can be shared across devices/subsystems. | |
3 | * | |
4 | * Copyright(C) 2011 Linaro Limited. All rights reserved. | |
5 | * Author: Sumit Semwal <[email protected]> | |
6 | * | |
7 | * Many thanks to linaro-mm-sig list, and specially | |
8 | * Arnd Bergmann <[email protected]>, Rob Clark <[email protected]> and | |
9 | * Daniel Vetter <[email protected]> for their support in creation and | |
10 | * refining of this idea. | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License version 2 as published by | |
14 | * the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
19 | * more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along with | |
22 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
23 | */ | |
24 | ||
25 | #include <linux/fs.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/dma-buf.h> | |
28 | #include <linux/anon_inodes.h> | |
29 | #include <linux/export.h> | |
30 | ||
31 | static inline int is_dma_buf_file(struct file *); | |
32 | ||
33 | static int dma_buf_release(struct inode *inode, struct file *file) | |
34 | { | |
35 | struct dma_buf *dmabuf; | |
36 | ||
37 | if (!is_dma_buf_file(file)) | |
38 | return -EINVAL; | |
39 | ||
40 | dmabuf = file->private_data; | |
41 | ||
42 | dmabuf->ops->release(dmabuf); | |
43 | kfree(dmabuf); | |
44 | return 0; | |
45 | } | |
46 | ||
4c78513e SV |
47 | static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) |
48 | { | |
49 | struct dma_buf *dmabuf; | |
50 | ||
51 | if (!is_dma_buf_file(file)) | |
52 | return -EINVAL; | |
53 | ||
54 | dmabuf = file->private_data; | |
55 | ||
56 | /* check for overflowing the buffer's size */ | |
57 | if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | |
58 | dmabuf->size >> PAGE_SHIFT) | |
59 | return -EINVAL; | |
60 | ||
61 | return dmabuf->ops->mmap(dmabuf, vma); | |
62 | } | |
63 | ||
d15bd7ee SS |
64 | static const struct file_operations dma_buf_fops = { |
65 | .release = dma_buf_release, | |
4c78513e | 66 | .mmap = dma_buf_mmap_internal, |
d15bd7ee SS |
67 | }; |
68 | ||
69 | /* | |
70 | * is_dma_buf_file - Check if struct file* is associated with dma_buf | |
71 | */ | |
72 | static inline int is_dma_buf_file(struct file *file) | |
73 | { | |
74 | return file->f_op == &dma_buf_fops; | |
75 | } | |
76 | ||
77 | /** | |
78 | * dma_buf_export - Creates a new dma_buf, and associates an anon file | |
79 | * with this buffer, so it can be exported. | |
80 | * Also connect the allocator specific data and ops to the buffer. | |
81 | * | |
82 | * @priv: [in] Attach private data of allocator to this buffer | |
83 | * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. | |
84 | * @size: [in] Size of the buffer | |
85 | * @flags: [in] mode flags for the file. | |
86 | * | |
87 | * Returns, on success, a newly created dma_buf object, which wraps the | |
88 | * supplied private data and operations for dma_buf_ops. On either missing | |
89 | * ops, or error in allocating struct dma_buf, will return negative error. | |
90 | * | |
91 | */ | |
5375764f | 92 | struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, |
d15bd7ee SS |
93 | size_t size, int flags) |
94 | { | |
95 | struct dma_buf *dmabuf; | |
96 | struct file *file; | |
97 | ||
98 | if (WARN_ON(!priv || !ops | |
99 | || !ops->map_dma_buf | |
100 | || !ops->unmap_dma_buf | |
fc13020e SV |
101 | || !ops->release |
102 | || !ops->kmap_atomic | |
4c78513e SV |
103 | || !ops->kmap |
104 | || !ops->mmap)) { | |
d15bd7ee SS |
105 | return ERR_PTR(-EINVAL); |
106 | } | |
107 | ||
108 | dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); | |
109 | if (dmabuf == NULL) | |
110 | return ERR_PTR(-ENOMEM); | |
111 | ||
112 | dmabuf->priv = priv; | |
113 | dmabuf->ops = ops; | |
114 | dmabuf->size = size; | |
115 | ||
116 | file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); | |
117 | ||
118 | dmabuf->file = file; | |
119 | ||
120 | mutex_init(&dmabuf->lock); | |
121 | INIT_LIST_HEAD(&dmabuf->attachments); | |
122 | ||
123 | return dmabuf; | |
124 | } | |
125 | EXPORT_SYMBOL_GPL(dma_buf_export); | |
126 | ||
127 | ||
128 | /** | |
129 | * dma_buf_fd - returns a file descriptor for the given dma_buf | |
130 | * @dmabuf: [in] pointer to dma_buf for which fd is required. | |
55c1c4ca | 131 | * @flags: [in] flags to give to fd |
d15bd7ee SS |
132 | * |
133 | * On success, returns an associated 'fd'. Else, returns error. | |
134 | */ | |
55c1c4ca | 135 | int dma_buf_fd(struct dma_buf *dmabuf, int flags) |
d15bd7ee SS |
136 | { |
137 | int error, fd; | |
138 | ||
139 | if (!dmabuf || !dmabuf->file) | |
140 | return -EINVAL; | |
141 | ||
55c1c4ca | 142 | error = get_unused_fd_flags(flags); |
d15bd7ee SS |
143 | if (error < 0) |
144 | return error; | |
145 | fd = error; | |
146 | ||
147 | fd_install(fd, dmabuf->file); | |
148 | ||
149 | return fd; | |
150 | } | |
151 | EXPORT_SYMBOL_GPL(dma_buf_fd); | |
152 | ||
153 | /** | |
154 | * dma_buf_get - returns the dma_buf structure related to an fd | |
155 | * @fd: [in] fd associated with the dma_buf to be returned | |
156 | * | |
157 | * On success, returns the dma_buf structure associated with an fd; uses | |
158 | * file's refcounting done by fget to increase refcount. returns ERR_PTR | |
159 | * otherwise. | |
160 | */ | |
161 | struct dma_buf *dma_buf_get(int fd) | |
162 | { | |
163 | struct file *file; | |
164 | ||
165 | file = fget(fd); | |
166 | ||
167 | if (!file) | |
168 | return ERR_PTR(-EBADF); | |
169 | ||
170 | if (!is_dma_buf_file(file)) { | |
171 | fput(file); | |
172 | return ERR_PTR(-EINVAL); | |
173 | } | |
174 | ||
175 | return file->private_data; | |
176 | } | |
177 | EXPORT_SYMBOL_GPL(dma_buf_get); | |
178 | ||
179 | /** | |
180 | * dma_buf_put - decreases refcount of the buffer | |
181 | * @dmabuf: [in] buffer to reduce refcount of | |
182 | * | |
183 | * Uses file's refcounting done implicitly by fput() | |
184 | */ | |
185 | void dma_buf_put(struct dma_buf *dmabuf) | |
186 | { | |
187 | if (WARN_ON(!dmabuf || !dmabuf->file)) | |
188 | return; | |
189 | ||
190 | fput(dmabuf->file); | |
191 | } | |
192 | EXPORT_SYMBOL_GPL(dma_buf_put); | |
193 | ||
194 | /** | |
195 | * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, | |
196 | * calls attach() of dma_buf_ops to allow device-specific attach functionality | |
197 | * @dmabuf: [in] buffer to attach device to. | |
198 | * @dev: [in] device to be attached. | |
199 | * | |
200 | * Returns struct dma_buf_attachment * for this attachment; may return negative | |
201 | * error codes. | |
202 | * | |
203 | */ | |
204 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | |
205 | struct device *dev) | |
206 | { | |
207 | struct dma_buf_attachment *attach; | |
208 | int ret; | |
209 | ||
d1aa06a1 | 210 | if (WARN_ON(!dmabuf || !dev)) |
d15bd7ee SS |
211 | return ERR_PTR(-EINVAL); |
212 | ||
213 | attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); | |
214 | if (attach == NULL) | |
a9fbc3b7 | 215 | return ERR_PTR(-ENOMEM); |
d15bd7ee | 216 | |
d15bd7ee SS |
217 | attach->dev = dev; |
218 | attach->dmabuf = dmabuf; | |
2ed9201b LP |
219 | |
220 | mutex_lock(&dmabuf->lock); | |
221 | ||
d15bd7ee SS |
222 | if (dmabuf->ops->attach) { |
223 | ret = dmabuf->ops->attach(dmabuf, dev, attach); | |
224 | if (ret) | |
225 | goto err_attach; | |
226 | } | |
227 | list_add(&attach->node, &dmabuf->attachments); | |
228 | ||
229 | mutex_unlock(&dmabuf->lock); | |
230 | return attach; | |
231 | ||
d15bd7ee SS |
232 | err_attach: |
233 | kfree(attach); | |
234 | mutex_unlock(&dmabuf->lock); | |
235 | return ERR_PTR(ret); | |
236 | } | |
237 | EXPORT_SYMBOL_GPL(dma_buf_attach); | |
238 | ||
239 | /** | |
240 | * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; | |
241 | * optionally calls detach() of dma_buf_ops for device-specific detach | |
242 | * @dmabuf: [in] buffer to detach from. | |
243 | * @attach: [in] attachment to be detached; is free'd after this call. | |
244 | * | |
245 | */ | |
246 | void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) | |
247 | { | |
d1aa06a1 | 248 | if (WARN_ON(!dmabuf || !attach)) |
d15bd7ee SS |
249 | return; |
250 | ||
251 | mutex_lock(&dmabuf->lock); | |
252 | list_del(&attach->node); | |
253 | if (dmabuf->ops->detach) | |
254 | dmabuf->ops->detach(dmabuf, attach); | |
255 | ||
256 | mutex_unlock(&dmabuf->lock); | |
257 | kfree(attach); | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(dma_buf_detach); | |
260 | ||
261 | /** | |
262 | * dma_buf_map_attachment - Returns the scatterlist table of the attachment; | |
263 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the | |
264 | * dma_buf_ops. | |
265 | * @attach: [in] attachment whose scatterlist is to be returned | |
266 | * @direction: [in] direction of DMA transfer | |
267 | * | |
268 | * Returns sg_table containing the scatterlist to be returned; may return NULL | |
269 | * or ERR_PTR. | |
270 | * | |
271 | */ | |
272 | struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, | |
273 | enum dma_data_direction direction) | |
274 | { | |
275 | struct sg_table *sg_table = ERR_PTR(-EINVAL); | |
276 | ||
277 | might_sleep(); | |
278 | ||
d1aa06a1 | 279 | if (WARN_ON(!attach || !attach->dmabuf)) |
d15bd7ee SS |
280 | return ERR_PTR(-EINVAL); |
281 | ||
d1aa06a1 | 282 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); |
d15bd7ee SS |
283 | |
284 | return sg_table; | |
285 | } | |
286 | EXPORT_SYMBOL_GPL(dma_buf_map_attachment); | |
287 | ||
288 | /** | |
289 | * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might | |
290 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of | |
291 | * dma_buf_ops. | |
292 | * @attach: [in] attachment to unmap buffer from | |
293 | * @sg_table: [in] scatterlist info of the buffer to unmap | |
33ea2dcb | 294 | * @direction: [in] direction of DMA transfer |
d15bd7ee SS |
295 | * |
296 | */ | |
297 | void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, | |
33ea2dcb SS |
298 | struct sg_table *sg_table, |
299 | enum dma_data_direction direction) | |
d15bd7ee | 300 | { |
d1aa06a1 | 301 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
d15bd7ee SS |
302 | return; |
303 | ||
33ea2dcb SS |
304 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, |
305 | direction); | |
d15bd7ee SS |
306 | } |
307 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); | |
fc13020e SV |
308 | |
309 | ||
310 | /** | |
311 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the | |
312 | * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific | |
313 | * preparations. Coherency is only guaranteed in the specified range for the | |
314 | * specified access direction. | |
efb4df82 | 315 | * @dmabuf: [in] buffer to prepare cpu access for. |
fc13020e SV |
316 | * @start: [in] start of range for cpu access. |
317 | * @len: [in] length of range for cpu access. | |
318 | * @direction: [in] length of range for cpu access. | |
319 | * | |
320 | * Can return negative error values, returns 0 on success. | |
321 | */ | |
322 | int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, | |
323 | enum dma_data_direction direction) | |
324 | { | |
325 | int ret = 0; | |
326 | ||
327 | if (WARN_ON(!dmabuf)) | |
328 | return -EINVAL; | |
329 | ||
330 | if (dmabuf->ops->begin_cpu_access) | |
331 | ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); | |
332 | ||
333 | return ret; | |
334 | } | |
335 | EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); | |
336 | ||
337 | /** | |
338 | * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the | |
339 | * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific | |
340 | * actions. Coherency is only guaranteed in the specified range for the | |
341 | * specified access direction. | |
efb4df82 | 342 | * @dmabuf: [in] buffer to complete cpu access for. |
fc13020e SV |
343 | * @start: [in] start of range for cpu access. |
344 | * @len: [in] length of range for cpu access. | |
345 | * @direction: [in] length of range for cpu access. | |
346 | * | |
347 | * This call must always succeed. | |
348 | */ | |
349 | void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, | |
350 | enum dma_data_direction direction) | |
351 | { | |
352 | WARN_ON(!dmabuf); | |
353 | ||
354 | if (dmabuf->ops->end_cpu_access) | |
355 | dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); | |
356 | } | |
357 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); | |
358 | ||
359 | /** | |
360 | * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address | |
361 | * space. The same restrictions as for kmap_atomic and friends apply. | |
efb4df82 | 362 | * @dmabuf: [in] buffer to map page from. |
fc13020e SV |
363 | * @page_num: [in] page in PAGE_SIZE units to map. |
364 | * | |
365 | * This call must always succeed, any necessary preparations that might fail | |
366 | * need to be done in begin_cpu_access. | |
367 | */ | |
368 | void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) | |
369 | { | |
370 | WARN_ON(!dmabuf); | |
371 | ||
372 | return dmabuf->ops->kmap_atomic(dmabuf, page_num); | |
373 | } | |
374 | EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); | |
375 | ||
376 | /** | |
377 | * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. | |
efb4df82 | 378 | * @dmabuf: [in] buffer to unmap page from. |
fc13020e SV |
379 | * @page_num: [in] page in PAGE_SIZE units to unmap. |
380 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. | |
381 | * | |
382 | * This call must always succeed. | |
383 | */ | |
384 | void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, | |
385 | void *vaddr) | |
386 | { | |
387 | WARN_ON(!dmabuf); | |
388 | ||
389 | if (dmabuf->ops->kunmap_atomic) | |
390 | dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); | |
391 | } | |
392 | EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); | |
393 | ||
394 | /** | |
395 | * dma_buf_kmap - Map a page of the buffer object into kernel address space. The | |
396 | * same restrictions as for kmap and friends apply. | |
efb4df82 | 397 | * @dmabuf: [in] buffer to map page from. |
fc13020e SV |
398 | * @page_num: [in] page in PAGE_SIZE units to map. |
399 | * | |
400 | * This call must always succeed, any necessary preparations that might fail | |
401 | * need to be done in begin_cpu_access. | |
402 | */ | |
403 | void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) | |
404 | { | |
405 | WARN_ON(!dmabuf); | |
406 | ||
407 | return dmabuf->ops->kmap(dmabuf, page_num); | |
408 | } | |
409 | EXPORT_SYMBOL_GPL(dma_buf_kmap); | |
410 | ||
411 | /** | |
412 | * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. | |
efb4df82 | 413 | * @dmabuf: [in] buffer to unmap page from. |
fc13020e SV |
414 | * @page_num: [in] page in PAGE_SIZE units to unmap. |
415 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. | |
416 | * | |
417 | * This call must always succeed. | |
418 | */ | |
419 | void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, | |
420 | void *vaddr) | |
421 | { | |
422 | WARN_ON(!dmabuf); | |
423 | ||
424 | if (dmabuf->ops->kunmap) | |
425 | dmabuf->ops->kunmap(dmabuf, page_num, vaddr); | |
426 | } | |
427 | EXPORT_SYMBOL_GPL(dma_buf_kunmap); | |
4c78513e SV |
428 | |
429 | ||
430 | /** | |
431 | * dma_buf_mmap - Setup up a userspace mmap with the given vma | |
12c4727e | 432 | * @dmabuf: [in] buffer that should back the vma |
4c78513e SV |
433 | * @vma: [in] vma for the mmap |
434 | * @pgoff: [in] offset in pages where this mmap should start within the | |
435 | * dma-buf buffer. | |
436 | * | |
437 | * This function adjusts the passed in vma so that it points at the file of the | |
438 | * dma_buf operation. It alsog adjusts the starting pgoff and does bounds | |
439 | * checking on the size of the vma. Then it calls the exporters mmap function to | |
440 | * set up the mapping. | |
441 | * | |
442 | * Can return negative error values, returns 0 on success. | |
443 | */ | |
444 | int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, | |
445 | unsigned long pgoff) | |
446 | { | |
447 | if (WARN_ON(!dmabuf || !vma)) | |
448 | return -EINVAL; | |
449 | ||
450 | /* check for offset overflow */ | |
451 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) | |
452 | return -EOVERFLOW; | |
453 | ||
454 | /* check for overflowing the buffer's size */ | |
455 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | |
456 | dmabuf->size >> PAGE_SHIFT) | |
457 | return -EINVAL; | |
458 | ||
459 | /* readjust the vma */ | |
460 | if (vma->vm_file) | |
461 | fput(vma->vm_file); | |
462 | ||
463 | vma->vm_file = dmabuf->file; | |
464 | get_file(vma->vm_file); | |
465 | ||
466 | vma->vm_pgoff = pgoff; | |
467 | ||
468 | return dmabuf->ops->mmap(dmabuf, vma); | |
469 | } | |
470 | EXPORT_SYMBOL_GPL(dma_buf_mmap); | |
98f86c9e DA |
471 | |
472 | /** | |
12c4727e SS |
473 | * dma_buf_vmap - Create virtual mapping for the buffer object into kernel |
474 | * address space. Same restrictions as for vmap and friends apply. | |
475 | * @dmabuf: [in] buffer to vmap | |
98f86c9e DA |
476 | * |
477 | * This call may fail due to lack of virtual mapping address space. | |
478 | * These calls are optional in drivers. The intended use for them | |
479 | * is for mapping objects linear in kernel space for high use objects. | |
480 | * Please attempt to use kmap/kunmap before thinking about these interfaces. | |
481 | */ | |
482 | void *dma_buf_vmap(struct dma_buf *dmabuf) | |
483 | { | |
484 | if (WARN_ON(!dmabuf)) | |
485 | return NULL; | |
486 | ||
487 | if (dmabuf->ops->vmap) | |
488 | return dmabuf->ops->vmap(dmabuf); | |
489 | return NULL; | |
490 | } | |
491 | EXPORT_SYMBOL_GPL(dma_buf_vmap); | |
492 | ||
493 | /** | |
494 | * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. | |
12c4727e | 495 | * @dmabuf: [in] buffer to vunmap |
6e7b4a59 | 496 | * @vaddr: [in] vmap to vunmap |
98f86c9e DA |
497 | */ |
498 | void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) | |
499 | { | |
500 | if (WARN_ON(!dmabuf)) | |
501 | return; | |
502 | ||
503 | if (dmabuf->ops->vunmap) | |
504 | dmabuf->ops->vunmap(dmabuf, vaddr); | |
505 | } | |
506 | EXPORT_SYMBOL_GPL(dma_buf_vunmap); |