]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
Merge tag 'pwm/for-4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_prime.c
1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * based on nouveau_prime.c
23  *
24  * Authors: Alex Deucher
25  */
26 #include <drm/drmP.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_display.h"
30 #include <drm/amdgpu_drm.h>
31 #include <linux/dma-buf.h>
32
33 static const struct dma_buf_ops amdgpu_dmabuf_ops;
34
35 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
36 {
37         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
38         int npages = bo->tbo.num_pages;
39
40         return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
41 }
42
43 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
44 {
45         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
46         int ret;
47
48         ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
49                           &bo->dma_buf_vmap);
50         if (ret)
51                 return ERR_PTR(ret);
52
53         return bo->dma_buf_vmap.virtual;
54 }
55
56 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
57 {
58         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
59
60         ttm_bo_kunmap(&bo->dma_buf_vmap);
61 }
62
63 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
64 {
65         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
66         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
67         unsigned asize = amdgpu_bo_size(bo);
68         int ret;
69
70         if (!vma->vm_file)
71                 return -ENODEV;
72
73         if (adev == NULL)
74                 return -ENODEV;
75
76         /* Check for valid size. */
77         if (asize < vma->vm_end - vma->vm_start)
78                 return -EINVAL;
79
80         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
81             (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
82                 return -EPERM;
83         }
84         vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
85
86         /* prime mmap does not need to check access, so allow here */
87         ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
88         if (ret)
89                 return ret;
90
91         ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
92         drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
93
94         return ret;
95 }
96
97 struct drm_gem_object *
98 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
99                                  struct dma_buf_attachment *attach,
100                                  struct sg_table *sg)
101 {
102         struct reservation_object *resv = attach->dmabuf->resv;
103         struct amdgpu_device *adev = dev->dev_private;
104         struct amdgpu_bo *bo;
105         struct amdgpu_bo_param bp;
106         int ret;
107
108         memset(&bp, 0, sizeof(bp));
109         bp.size = attach->dmabuf->size;
110         bp.byte_align = PAGE_SIZE;
111         bp.domain = AMDGPU_GEM_DOMAIN_CPU;
112         bp.flags = 0;
113         bp.type = ttm_bo_type_sg;
114         bp.resv = resv;
115         ww_mutex_lock(&resv->lock, NULL);
116         ret = amdgpu_bo_create(adev, &bp, &bo);
117         if (ret)
118                 goto error;
119
120         bo->tbo.sg = sg;
121         bo->tbo.ttm->sg = sg;
122         bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
123         bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
124         if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
125                 bo->prime_shared_count = 1;
126
127         ww_mutex_unlock(&resv->lock);
128         return &bo->gem_base;
129
130 error:
131         ww_mutex_unlock(&resv->lock);
132         return ERR_PTR(ret);
133 }
134
135 static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
136                                  struct device *target_dev,
137                                  struct dma_buf_attachment *attach)
138 {
139         struct drm_gem_object *obj = dma_buf->priv;
140         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
141         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
142         long r;
143
144         r = drm_gem_map_attach(dma_buf, target_dev, attach);
145         if (r)
146                 return r;
147
148         r = amdgpu_bo_reserve(bo, false);
149         if (unlikely(r != 0))
150                 goto error_detach;
151
152
153         if (attach->dev->driver != adev->dev->driver) {
154                 /*
155                  * Wait for all shared fences to complete before we switch to future
156                  * use of exclusive fence on this prime shared bo.
157                  */
158                 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
159                                                         true, false,
160                                                         MAX_SCHEDULE_TIMEOUT);
161                 if (unlikely(r < 0)) {
162                         DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
163                         goto error_unreserve;
164                 }
165         }
166
167         /* pin buffer into GTT */
168         r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
169         if (r)
170                 goto error_unreserve;
171
172         if (attach->dev->driver != adev->dev->driver)
173                 bo->prime_shared_count++;
174
175 error_unreserve:
176         amdgpu_bo_unreserve(bo);
177
178 error_detach:
179         if (r)
180                 drm_gem_map_detach(dma_buf, attach);
181         return r;
182 }
183
184 static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
185                                   struct dma_buf_attachment *attach)
186 {
187         struct drm_gem_object *obj = dma_buf->priv;
188         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
189         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
190         int ret = 0;
191
192         ret = amdgpu_bo_reserve(bo, true);
193         if (unlikely(ret != 0))
194                 goto error;
195
196         amdgpu_bo_unpin(bo);
197         if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
198                 bo->prime_shared_count--;
199         amdgpu_bo_unreserve(bo);
200
201 error:
202         drm_gem_map_detach(dma_buf, attach);
203 }
204
205 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
206 {
207         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
208
209         return bo->tbo.resv;
210 }
211
212 static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
213                                        enum dma_data_direction direction)
214 {
215         struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
216         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
217         struct ttm_operation_ctx ctx = { true, false };
218         u32 domain = amdgpu_display_supported_domains(adev);
219         int ret;
220         bool reads = (direction == DMA_BIDIRECTIONAL ||
221                       direction == DMA_FROM_DEVICE);
222
223         if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
224                 return 0;
225
226         /* move to gtt */
227         ret = amdgpu_bo_reserve(bo, false);
228         if (unlikely(ret != 0))
229                 return ret;
230
231         if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
232                 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
233                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
234         }
235
236         amdgpu_bo_unreserve(bo);
237         return ret;
238 }
239
240 static const struct dma_buf_ops amdgpu_dmabuf_ops = {
241         .attach = amdgpu_gem_map_attach,
242         .detach = amdgpu_gem_map_detach,
243         .map_dma_buf = drm_gem_map_dma_buf,
244         .unmap_dma_buf = drm_gem_unmap_dma_buf,
245         .release = drm_gem_dmabuf_release,
246         .begin_cpu_access = amdgpu_gem_begin_cpu_access,
247         .map = drm_gem_dmabuf_kmap,
248         .map_atomic = drm_gem_dmabuf_kmap_atomic,
249         .unmap = drm_gem_dmabuf_kunmap,
250         .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
251         .mmap = drm_gem_dmabuf_mmap,
252         .vmap = drm_gem_dmabuf_vmap,
253         .vunmap = drm_gem_dmabuf_vunmap,
254 };
255
256 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
257                                         struct drm_gem_object *gobj,
258                                         int flags)
259 {
260         struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
261         struct dma_buf *buf;
262
263         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
264             bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
265                 return ERR_PTR(-EPERM);
266
267         buf = drm_gem_prime_export(dev, gobj, flags);
268         if (!IS_ERR(buf)) {
269                 buf->file->f_mapping = dev->anon_inode->i_mapping;
270                 buf->ops = &amdgpu_dmabuf_ops;
271         }
272
273         return buf;
274 }
275
276 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
277                                             struct dma_buf *dma_buf)
278 {
279         struct drm_gem_object *obj;
280
281         if (dma_buf->ops == &amdgpu_dmabuf_ops) {
282                 obj = dma_buf->priv;
283                 if (obj->dev == dev) {
284                         /*
285                          * Importing dmabuf exported from out own gem increases
286                          * refcount on gem itself instead of f_count of dmabuf.
287                          */
288                         drm_gem_object_get(obj);
289                         return obj;
290                 }
291         }
292
293         return drm_gem_prime_import(dev, dma_buf);
294 }
This page took 0.049601 seconds and 4 git commands to generate.