1 // SPDX-License-Identifier: GPL-2.0
2 #include "virtgpu_drv.h"
4 #include <linux/dma-mapping.h>
6 static void virtio_gpu_vram_free(struct drm_gem_object *obj)
8 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
9 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
10 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
14 spin_lock(&vgdev->host_visible_lock);
15 unmap = drm_mm_node_allocated(&vram->vram_node);
16 spin_unlock(&vgdev->host_visible_lock);
19 virtio_gpu_cmd_unmap(vgdev, bo);
21 virtio_gpu_cmd_unref_resource(vgdev, bo);
22 virtio_gpu_notify(vgdev);
27 static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
28 .open = drm_gem_vm_open,
29 .close = drm_gem_vm_close,
32 static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
33 struct vm_area_struct *vma)
36 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
37 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
38 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
39 unsigned long vm_size = vma->vm_end - vma->vm_start;
41 if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
44 wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
45 if (vram->map_state != STATE_OK)
48 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
49 vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
50 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
51 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
52 vma->vm_ops = &virtio_gpu_vram_vm_ops;
54 if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
56 else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
57 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
59 /* Partial mappings of GEM buffers don't happen much in practice. */
60 if (vm_size != vram->vram_node.size)
63 ret = io_remap_pfn_range(vma, vma->vm_start,
64 vram->vram_node.start >> PAGE_SHIFT,
65 vm_size, vma->vm_page_prot);
69 struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
71 enum dma_data_direction dir)
73 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
74 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
79 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
81 return ERR_PTR(-ENOMEM);
83 if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
84 // Virtio devices can access the dma-buf via its UUID. Return a stub
85 // sg_table so the dma-buf API still works.
86 if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
93 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
97 addr = dma_map_resource(dev, vram->vram_node.start,
98 vram->vram_node.size, dir,
99 DMA_ATTR_SKIP_CPU_SYNC);
100 ret = dma_mapping_error(dev, addr);
104 sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
105 sg_dma_address(sgt->sgl) = addr;
106 sg_dma_len(sgt->sgl) = vram->vram_node.size;
115 void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
116 struct sg_table *sgt,
117 enum dma_data_direction dir)
120 dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
121 sg_dma_len(sgt->sgl), dir,
122 DMA_ATTR_SKIP_CPU_SYNC);
128 static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
129 .open = virtio_gpu_gem_object_open,
130 .close = virtio_gpu_gem_object_close,
131 .free = virtio_gpu_vram_free,
132 .mmap = virtio_gpu_vram_mmap,
133 .export = virtgpu_gem_prime_export,
136 bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
138 return bo->base.base.funcs == &virtio_gpu_vram_funcs;
141 static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
145 struct virtio_gpu_object_array *objs;
146 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
147 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
149 if (!vgdev->has_host_visible)
152 spin_lock(&vgdev->host_visible_lock);
153 ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
155 spin_unlock(&vgdev->host_visible_lock);
160 objs = virtio_gpu_array_alloc(1);
163 goto err_remove_node;
166 virtio_gpu_array_add_obj(objs, &bo->base.base);
167 /*TODO: Add an error checking helper function in drm_mm.h */
168 offset = vram->vram_node.start - vgdev->host_visible_region.addr;
170 ret = virtio_gpu_cmd_map(vgdev, objs, offset);
172 virtio_gpu_array_put_free(objs);
173 goto err_remove_node;
179 spin_lock(&vgdev->host_visible_lock);
180 drm_mm_remove_node(&vram->vram_node);
181 spin_unlock(&vgdev->host_visible_lock);
185 int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
186 struct virtio_gpu_object_params *params,
187 struct virtio_gpu_object **bo_ptr)
189 struct drm_gem_object *obj;
190 struct virtio_gpu_object_vram *vram;
193 vram = kzalloc(sizeof(*vram), GFP_KERNEL);
197 obj = &vram->base.base.base;
198 obj->funcs = &virtio_gpu_vram_funcs;
200 params->size = PAGE_ALIGN(params->size);
201 drm_gem_private_object_init(vgdev->ddev, obj, params->size);
203 /* Create fake offset */
204 ret = drm_gem_create_mmap_offset(obj);
210 ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
216 virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
218 if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
219 ret = virtio_gpu_vram_map(&vram->base);
221 virtio_gpu_vram_free(obj);
226 *bo_ptr = &vram->base;