]> Git Repo - linux.git/blob - drivers/gpu/drm/udl/udl_dmabuf.c
Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / udl / udl_dmabuf.c
1 /*
2  * udl_dmabuf.c
3  *
4  * Copyright (c) 2014 The Chromium OS Authors
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program. If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <drm/drmP.h>
21 #include "udl_drv.h"
22 #include <linux/shmem_fs.h>
23 #include <linux/dma-buf.h>
24
25 struct udl_drm_dmabuf_attachment {
26         struct sg_table sgt;
27         enum dma_data_direction dir;
28         bool is_mapped;
29 };
30
31 static int udl_attach_dma_buf(struct dma_buf *dmabuf,
32                               struct device *dev,
33                               struct dma_buf_attachment *attach)
34 {
35         struct udl_drm_dmabuf_attachment *udl_attach;
36
37         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
38                         attach->dmabuf->size);
39
40         udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
41         if (!udl_attach)
42                 return -ENOMEM;
43
44         udl_attach->dir = DMA_NONE;
45         attach->priv = udl_attach;
46
47         return 0;
48 }
49
50 static void udl_detach_dma_buf(struct dma_buf *dmabuf,
51                                struct dma_buf_attachment *attach)
52 {
53         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
54         struct sg_table *sgt;
55
56         if (!udl_attach)
57                 return;
58
59         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
60                         attach->dmabuf->size);
61
62         sgt = &udl_attach->sgt;
63
64         if (udl_attach->dir != DMA_NONE)
65                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
66                                 udl_attach->dir);
67
68         sg_free_table(sgt);
69         kfree(udl_attach);
70         attach->priv = NULL;
71 }
72
73 static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
74                                         enum dma_data_direction dir)
75 {
76         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
77         struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
78         struct drm_device *dev = obj->base.dev;
79         struct udl_device *udl = dev->dev_private;
80         struct scatterlist *rd, *wr;
81         struct sg_table *sgt = NULL;
82         unsigned int i;
83         int page_count;
84         int nents, ret;
85
86         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
87                         attach->dmabuf->size, dir);
88
89         /* just return current sgt if already requested. */
90         if (udl_attach->dir == dir && udl_attach->is_mapped)
91                 return &udl_attach->sgt;
92
93         if (!obj->pages) {
94                 ret = udl_gem_get_pages(obj);
95                 if (ret) {
96                         DRM_ERROR("failed to map pages.\n");
97                         return ERR_PTR(ret);
98                 }
99         }
100
101         page_count = obj->base.size / PAGE_SIZE;
102         obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
103         if (IS_ERR(obj->sg)) {
104                 DRM_ERROR("failed to allocate sgt.\n");
105                 return ERR_CAST(obj->sg);
106         }
107
108         sgt = &udl_attach->sgt;
109
110         ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
111         if (ret) {
112                 DRM_ERROR("failed to alloc sgt.\n");
113                 return ERR_PTR(-ENOMEM);
114         }
115
116         mutex_lock(&udl->gem_lock);
117
118         rd = obj->sg->sgl;
119         wr = sgt->sgl;
120         for (i = 0; i < sgt->orig_nents; ++i) {
121                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
122                 rd = sg_next(rd);
123                 wr = sg_next(wr);
124         }
125
126         if (dir != DMA_NONE) {
127                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
128                 if (!nents) {
129                         DRM_ERROR("failed to map sgl with iommu.\n");
130                         sg_free_table(sgt);
131                         sgt = ERR_PTR(-EIO);
132                         goto err_unlock;
133                 }
134         }
135
136         udl_attach->is_mapped = true;
137         udl_attach->dir = dir;
138         attach->priv = udl_attach;
139
140 err_unlock:
141         mutex_unlock(&udl->gem_lock);
142         return sgt;
143 }
144
145 static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
146                               struct sg_table *sgt,
147                               enum dma_data_direction dir)
148 {
149         /* Nothing to do. */
150         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
151                         attach->dmabuf->size, dir);
152 }
153
154 static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
155 {
156         /* TODO */
157
158         return NULL;
159 }
160
161 static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
162                                     unsigned long page_num)
163 {
164         /* TODO */
165
166         return NULL;
167 }
168
169 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
170                               unsigned long page_num, void *addr)
171 {
172         /* TODO */
173 }
174
175 static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
176                                      unsigned long page_num,
177                                      void *addr)
178 {
179         /* TODO */
180 }
181
182 static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
183                            struct vm_area_struct *vma)
184 {
185         /* TODO */
186
187         return -EINVAL;
188 }
189
190 static const struct dma_buf_ops udl_dmabuf_ops = {
191         .attach                 = udl_attach_dma_buf,
192         .detach                 = udl_detach_dma_buf,
193         .map_dma_buf            = udl_map_dma_buf,
194         .unmap_dma_buf          = udl_unmap_dma_buf,
195         .map                    = udl_dmabuf_kmap,
196         .map_atomic             = udl_dmabuf_kmap_atomic,
197         .unmap                  = udl_dmabuf_kunmap,
198         .unmap_atomic           = udl_dmabuf_kunmap_atomic,
199         .mmap                   = udl_dmabuf_mmap,
200         .release                = drm_gem_dmabuf_release,
201 };
202
203 struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
204                                      struct drm_gem_object *obj, int flags)
205 {
206         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
207
208         exp_info.ops = &udl_dmabuf_ops;
209         exp_info.size = obj->size;
210         exp_info.flags = flags;
211         exp_info.priv = obj;
212
213         return drm_gem_dmabuf_export(dev, &exp_info);
214 }
215
216 static int udl_prime_create(struct drm_device *dev,
217                             size_t size,
218                             struct sg_table *sg,
219                             struct udl_gem_object **obj_p)
220 {
221         struct udl_gem_object *obj;
222         int npages;
223
224         npages = size / PAGE_SIZE;
225
226         *obj_p = NULL;
227         obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
228         if (!obj)
229                 return -ENOMEM;
230
231         obj->sg = sg;
232         obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
233         if (obj->pages == NULL) {
234                 DRM_ERROR("obj pages is NULL %d\n", npages);
235                 return -ENOMEM;
236         }
237
238         drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
239
240         *obj_p = obj;
241         return 0;
242 }
243
244 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
245                                 struct dma_buf *dma_buf)
246 {
247         struct dma_buf_attachment *attach;
248         struct sg_table *sg;
249         struct udl_gem_object *uobj;
250         int ret;
251
252         /* need to attach */
253         get_device(dev->dev);
254         attach = dma_buf_attach(dma_buf, dev->dev);
255         if (IS_ERR(attach)) {
256                 put_device(dev->dev);
257                 return ERR_CAST(attach);
258         }
259
260         get_dma_buf(dma_buf);
261
262         sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
263         if (IS_ERR(sg)) {
264                 ret = PTR_ERR(sg);
265                 goto fail_detach;
266         }
267
268         ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
269         if (ret)
270                 goto fail_unmap;
271
272         uobj->base.import_attach = attach;
273         uobj->flags = UDL_BO_WC;
274
275         return &uobj->base;
276
277 fail_unmap:
278         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
279 fail_detach:
280         dma_buf_detach(dma_buf, attach);
281         dma_buf_put(dma_buf);
282         put_device(dev->dev);
283         return ERR_PTR(ret);
284 }
This page took 0.04841 seconds and 4 git commands to generate.