]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
Merge tag 'rpmsg-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_dma_buf.c
index 957934926b24529341b3b0a4c13468d7e012afee..e42175e1acf18b758c7ab64077c6f2496f0e4550 100644 (file)
 #include <linux/dma-fence-array.h>
 #include <linux/pci-p2pdma.h>
 
-/**
- * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
- * @obj: GEM BO
- *
- * Sets up an in-kernel virtual mapping of the BO's memory.
- *
- * Returns:
- * The virtual address of the mapping or an error pointer.
- */
-void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
-{
-       struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-       int ret;
-
-       ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
-                         &bo->dma_buf_vmap);
-       if (ret)
-               return ERR_PTR(ret);
-
-       return bo->dma_buf_vmap.virtual;
-}
-
-/**
- * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
- * @obj: GEM BO
- * @vaddr: Virtual address (unused)
- *
- * Tears down the in-kernel virtual mapping of the BO's memory.
- */
-void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
-       struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
-       ttm_bo_kunmap(&bo->dma_buf_vmap);
-}
-
 /**
  * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
  * @obj: GEM BO
@@ -281,7 +245,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
        struct sg_table *sgt;
        long r;
 
-       if (!bo->pin_count) {
+       if (!bo->tbo.pin_count) {
                /* move buffer into GTT or VRAM */
                struct ttm_operation_ctx ctx = { false, false };
                unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
@@ -390,7 +354,8 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
        if (unlikely(ret != 0))
                return ret;
 
-       if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+       if (!bo->tbo.pin_count &&
+           (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
                amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
                ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        }
@@ -459,6 +424,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_bo *bo;
        struct amdgpu_bo_param bp;
+       struct drm_gem_object *gobj;
        int ret;
 
        memset(&bp, 0, sizeof(bp));
@@ -469,17 +435,20 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
        bp.type = ttm_bo_type_sg;
        bp.resv = resv;
        dma_resv_lock(resv, NULL);
-       ret = amdgpu_bo_create(adev, &bp, &bo);
+       ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
+                       AMDGPU_GEM_DOMAIN_CPU,
+                       0, ttm_bo_type_sg, resv, &gobj);
        if (ret)
                goto error;
 
+       bo = gem_to_amdgpu_bo(gobj);
        bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
        bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
        if (dma_buf->ops != &amdgpu_dmabuf_ops)
                bo->prime_shared_count = 1;
 
        dma_resv_unlock(resv);
-       return &bo->tbo.base;
+       return gobj;
 
 error:
        dma_resv_unlock(resv);
This page took 0.035615 seconds and 4 git commands to generate.