]> Git Repo - J-linux.git/blobdiff - drivers/gpu/drm/virtio/virtgpu_vq.c
Merge tag '9p-for-4.20' of git://github.com/martinetd/linux
[J-linux.git] / drivers / gpu / drm / virtio / virtgpu_vq.c
index 020070d483d350a58695c7fa9f21f7c2be4db463..4e2e037aed34a22551225f2cb623f5492484c72a 100644 (file)
@@ -423,8 +423,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 }
 
-void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
-                                          uint32_t resource_id)
+static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+                                                 uint32_t resource_id,
+                                                 struct virtio_gpu_fence **fence)
 {
        struct virtio_gpu_resource_detach_backing *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -435,7 +436,7 @@ void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
        cmd_p->resource_id = cpu_to_le32(resource_id);
 
-       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 }
 
 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -482,19 +483,26 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 }
 
 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
-                                       uint32_t resource_id, uint64_t offset,
+                                       struct virtio_gpu_object *bo,
+                                       uint64_t offset,
                                        __le32 width, __le32 height,
                                        __le32 x, __le32 y,
                                        struct virtio_gpu_fence **fence)
 {
        struct virtio_gpu_transfer_to_host_2d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
+       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+
+       if (use_dma_api)
+               dma_sync_sg_for_device(vgdev->vdev->dev.parent,
+                                      bo->pages->sgl, bo->pages->nents,
+                                      DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
 
        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
-       cmd_p->resource_id = cpu_to_le32(resource_id);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->r.width = width;
        cmd_p->r.height = height;
@@ -648,11 +656,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 {
        struct virtio_gpu_get_capset *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
-       int max_size = vgdev->capsets[idx].max_size;
+       int max_size;
        struct virtio_gpu_drv_cap_cache *cache_ent;
        void *resp_buf;
 
-       if (idx > vgdev->num_capsets)
+       if (idx >= vgdev->num_capsets)
                return -EINVAL;
 
        if (version > vgdev->capsets[idx].max_version)
@@ -662,6 +670,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
        if (!cache_ent)
                return -ENOMEM;
 
+       max_size = vgdev->capsets[idx].max_size;
        cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
        if (!cache_ent->caps_cache) {
                kfree(cache_ent);
@@ -780,20 +789,27 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 }
 
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
-                                       uint32_t resource_id, uint32_t ctx_id,
+                                       struct virtio_gpu_object *bo,
+                                       uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
                                        struct virtio_gpu_box *box,
                                        struct virtio_gpu_fence **fence)
 {
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
+       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+
+       if (use_dma_api)
+               dma_sync_sg_for_device(vgdev->vdev->dev.parent,
+                                      bo->pages->sgl, bo->pages->nents,
+                                      DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
 
        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
-       cmd_p->resource_id = cpu_to_le32(resource_id);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
        cmd_p->box = *box;
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
@@ -848,9 +864,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
                             uint32_t resource_id,
                             struct virtio_gpu_fence **fence)
 {
+       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
        struct virtio_gpu_mem_entry *ents;
        struct scatterlist *sg;
-       int si;
+       int si, nents;
 
        if (!obj->pages) {
                int ret;
@@ -860,28 +877,60 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
                        return ret;
        }
 
+       if (use_dma_api) {
+               obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+                                        obj->pages->sgl, obj->pages->nents,
+                                        DMA_TO_DEVICE);
+               nents = obj->mapped;
+       } else {
+               nents = obj->pages->nents;
+       }
+
        /* gets freed when the ring has consumed it */
-       ents = kmalloc_array(obj->pages->nents,
-                            sizeof(struct virtio_gpu_mem_entry),
+       ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
                             GFP_KERNEL);
        if (!ents) {
                DRM_ERROR("failed to allocate ent list\n");
                return -ENOMEM;
        }
 
-       for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
-               ents[si].addr = cpu_to_le64(sg_phys(sg));
+       for_each_sg(obj->pages->sgl, sg, nents, si) {
+               ents[si].addr = cpu_to_le64(use_dma_api
+                                           ? sg_dma_address(sg)
+                                           : sg_phys(sg));
                ents[si].length = cpu_to_le32(sg->length);
                ents[si].padding = 0;
        }
 
        virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
-                                              ents, obj->pages->nents,
+                                              ents, nents,
                                               fence);
        obj->hw_res_handle = resource_id;
        return 0;
 }
 
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+                             struct virtio_gpu_object *obj)
+{
+       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+       struct virtio_gpu_fence *fence;
+
+       if (use_dma_api && obj->mapped) {
+               /* detach backing and wait for the host process it ... */
+               virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+               dma_fence_wait(&fence->f, true);
+               dma_fence_put(&fence->f);
+
+               /* ... then tear down iommu mappings */
+               dma_unmap_sg(vgdev->vdev->dev.parent,
+                            obj->pages->sgl, obj->mapped,
+                            DMA_TO_DEVICE);
+               obj->mapped = 0;
+       } else {
+               virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
+       }
+}
+
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
                            struct virtio_gpu_output *output)
 {
This page took 0.039447 seconds and 4 git commands to generate.