2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * based on nouveau_prime.c
24 * Authors: Alex Deucher
29 #include "amdgpu_display.h"
30 #include <drm/amdgpu_drm.h>
31 #include <linux/dma-buf.h>
33 static const struct dma_buf_ops amdgpu_dmabuf_ops;
35 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
37 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
38 int npages = bo->tbo.num_pages;
40 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
43 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
45 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
48 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
53 return bo->dma_buf_vmap.virtual;
56 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
60 ttm_bo_kunmap(&bo->dma_buf_vmap);
63 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
65 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
66 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
67 unsigned asize = amdgpu_bo_size(bo);
76 /* Check for valid size. */
77 if (asize < vma->vm_end - vma->vm_start)
80 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
81 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
84 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
86 /* prime mmap does not need to check access, so allow here */
87 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
91 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
92 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
97 struct drm_gem_object *
98 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
99 struct dma_buf_attachment *attach,
102 struct reservation_object *resv = attach->dmabuf->resv;
103 struct amdgpu_device *adev = dev->dev_private;
104 struct amdgpu_bo *bo;
107 ww_mutex_lock(&resv->lock, NULL);
108 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
109 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
110 ww_mutex_unlock(&resv->lock);
114 bo->prime_shared_count = 1;
115 return &bo->gem_base;
118 static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
119 struct device *target_dev,
120 struct dma_buf_attachment *attach)
122 struct drm_gem_object *obj = dma_buf->priv;
123 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
126 r = drm_gem_map_attach(dma_buf, target_dev, attach);
130 r = amdgpu_bo_reserve(bo, false);
131 if (unlikely(r != 0))
135 if (dma_buf->ops != &amdgpu_dmabuf_ops) {
137 * Wait for all shared fences to complete before we switch to future
138 * use of exclusive fence on this prime shared bo.
140 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
142 MAX_SCHEDULE_TIMEOUT);
143 if (unlikely(r < 0)) {
144 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
145 goto error_unreserve;
149 /* pin buffer into GTT */
150 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
152 goto error_unreserve;
154 if (dma_buf->ops != &amdgpu_dmabuf_ops)
155 bo->prime_shared_count++;
158 amdgpu_bo_unreserve(bo);
162 drm_gem_map_detach(dma_buf, attach);
166 static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
167 struct dma_buf_attachment *attach)
169 struct drm_gem_object *obj = dma_buf->priv;
170 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
173 ret = amdgpu_bo_reserve(bo, true);
174 if (unlikely(ret != 0))
178 if (dma_buf->ops != &amdgpu_dmabuf_ops && bo->prime_shared_count)
179 bo->prime_shared_count--;
180 amdgpu_bo_unreserve(bo);
183 drm_gem_map_detach(dma_buf, attach);
186 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
188 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
193 static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
194 enum dma_data_direction direction)
196 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
197 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
198 struct ttm_operation_ctx ctx = { true, false };
199 u32 domain = amdgpu_display_framebuffer_domains(adev);
201 bool reads = (direction == DMA_BIDIRECTIONAL ||
202 direction == DMA_FROM_DEVICE);
204 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
208 ret = amdgpu_bo_reserve(bo, false);
209 if (unlikely(ret != 0))
212 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
213 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
214 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
217 amdgpu_bo_unreserve(bo);
221 static const struct dma_buf_ops amdgpu_dmabuf_ops = {
222 .attach = amdgpu_gem_map_attach,
223 .detach = amdgpu_gem_map_detach,
224 .map_dma_buf = drm_gem_map_dma_buf,
225 .unmap_dma_buf = drm_gem_unmap_dma_buf,
226 .release = drm_gem_dmabuf_release,
227 .begin_cpu_access = amdgpu_gem_begin_cpu_access,
228 .map = drm_gem_dmabuf_kmap,
229 .map_atomic = drm_gem_dmabuf_kmap_atomic,
230 .unmap = drm_gem_dmabuf_kunmap,
231 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
232 .mmap = drm_gem_dmabuf_mmap,
233 .vmap = drm_gem_dmabuf_vmap,
234 .vunmap = drm_gem_dmabuf_vunmap,
237 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
238 struct drm_gem_object *gobj,
241 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
244 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
245 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
246 return ERR_PTR(-EPERM);
248 buf = drm_gem_prime_export(dev, gobj, flags);
250 buf->file->f_mapping = dev->anon_inode->i_mapping;
251 buf->ops = &amdgpu_dmabuf_ops;
257 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
258 struct dma_buf *dma_buf)
260 struct drm_gem_object *obj;
262 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
264 if (obj->dev == dev) {
266 * Importing dmabuf exported from out own gem increases
267 * refcount on gem itself instead of f_count of dmabuf.
269 drm_gem_object_get(obj);
274 return drm_gem_prime_import(dev, dma_buf);