]> Git Repo - linux.git/blob - drivers/gpu/drm/virtio/virtgpu_object.c
Merge tag 'cxl-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux.git] / drivers / gpu / drm / virtio / virtgpu_object.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <linux/dma-mapping.h>
27 #include <linux/moduleparam.h>
28
29 #include "virtgpu_drv.h"
30
31 static int virtio_gpu_virglrenderer_workaround = 1;
32 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33
34 int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
35 {
36         if (virtio_gpu_virglrenderer_workaround) {
37                 /*
38                  * Hack to avoid re-using resource IDs.
39                  *
40                  * virglrenderer versions up to (and including) 0.7.0
41                  * can't deal with that.  virglrenderer commit
42                  * "f91a9dd35715 Fix unlinking resources from hash
43                  * table." (Feb 2019) fixes the bug.
44                  */
45                 static atomic_t seqno = ATOMIC_INIT(0);
46                 int handle = atomic_inc_return(&seqno);
47                 *resid = handle + 1;
48         } else {
49                 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
50                 if (handle < 0)
51                         return handle;
52                 *resid = handle + 1;
53         }
54         return 0;
55 }
56
57 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
58 {
59         if (!virtio_gpu_virglrenderer_workaround) {
60                 ida_free(&vgdev->resource_ida, id - 1);
61         }
62 }
63
64 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
65 {
66         struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
67
68         virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
69         if (virtio_gpu_is_shmem(bo)) {
70                 drm_gem_shmem_free(&bo->base);
71         } else if (virtio_gpu_is_vram(bo)) {
72                 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
73
74                 spin_lock(&vgdev->host_visible_lock);
75                 if (drm_mm_node_allocated(&vram->vram_node))
76                         drm_mm_remove_node(&vram->vram_node);
77
78                 spin_unlock(&vgdev->host_visible_lock);
79
80                 drm_gem_free_mmap_offset(&vram->base.base.base);
81                 drm_gem_object_release(&vram->base.base.base);
82                 kfree(vram);
83         }
84 }
85
86 static void virtio_gpu_free_object(struct drm_gem_object *obj)
87 {
88         struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
89         struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
90
91         if (bo->created) {
92                 virtio_gpu_cmd_unref_resource(vgdev, bo);
93                 virtio_gpu_notify(vgdev);
94                 /* completion handler calls virtio_gpu_cleanup_object() */
95                 return;
96         }
97         virtio_gpu_cleanup_object(bo);
98 }
99
100 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
101         .free = virtio_gpu_free_object,
102         .open = virtio_gpu_gem_object_open,
103         .close = virtio_gpu_gem_object_close,
104         .print_info = drm_gem_shmem_object_print_info,
105         .export = virtgpu_gem_prime_export,
106         .pin = drm_gem_shmem_object_pin,
107         .unpin = drm_gem_shmem_object_unpin,
108         .get_sg_table = drm_gem_shmem_object_get_sg_table,
109         .vmap = drm_gem_shmem_object_vmap,
110         .vunmap = drm_gem_shmem_object_vunmap,
111         .mmap = drm_gem_shmem_object_mmap,
112         .vm_ops = &drm_gem_shmem_vm_ops,
113 };
114
115 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
116 {
117         return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
118 }
119
120 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
121                                                 size_t size)
122 {
123         struct virtio_gpu_object_shmem *shmem;
124         struct drm_gem_shmem_object *dshmem;
125
126         shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
127         if (!shmem)
128                 return ERR_PTR(-ENOMEM);
129
130         dshmem = &shmem->base.base;
131         dshmem->base.funcs = &virtio_gpu_shmem_funcs;
132         return &dshmem->base;
133 }
134
135 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
136                                         struct virtio_gpu_object *bo,
137                                         struct virtio_gpu_mem_entry **ents,
138                                         unsigned int *nents)
139 {
140         bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
141         struct scatterlist *sg;
142         struct sg_table *pages;
143         int si;
144
145         pages = drm_gem_shmem_get_pages_sgt(&bo->base);
146         if (IS_ERR(pages))
147                 return PTR_ERR(pages);
148
149         if (use_dma_api)
150                 *nents = pages->nents;
151         else
152                 *nents = pages->orig_nents;
153
154         *ents = kvmalloc_array(*nents,
155                                sizeof(struct virtio_gpu_mem_entry),
156                                GFP_KERNEL);
157         if (!(*ents)) {
158                 DRM_ERROR("failed to allocate ent list\n");
159                 return -ENOMEM;
160         }
161
162         if (use_dma_api) {
163                 for_each_sgtable_dma_sg(pages, sg, si) {
164                         (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
165                         (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
166                         (*ents)[si].padding = 0;
167                 }
168         } else {
169                 for_each_sgtable_sg(pages, sg, si) {
170                         (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
171                         (*ents)[si].length = cpu_to_le32(sg->length);
172                         (*ents)[si].padding = 0;
173                 }
174         }
175
176         return 0;
177 }
178
179 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
180                              struct virtio_gpu_object_params *params,
181                              struct virtio_gpu_object **bo_ptr,
182                              struct virtio_gpu_fence *fence)
183 {
184         struct virtio_gpu_object_array *objs = NULL;
185         struct drm_gem_shmem_object *shmem_obj;
186         struct virtio_gpu_object *bo;
187         struct virtio_gpu_mem_entry *ents = NULL;
188         unsigned int nents;
189         int ret;
190
191         *bo_ptr = NULL;
192
193         params->size = roundup(params->size, PAGE_SIZE);
194         shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
195         if (IS_ERR(shmem_obj))
196                 return PTR_ERR(shmem_obj);
197         bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
198
199         ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
200         if (ret < 0)
201                 goto err_free_gem;
202
203         bo->dumb = params->dumb;
204
205         ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
206         if (ret != 0)
207                 goto err_put_id;
208
209         if (fence) {
210                 ret = -ENOMEM;
211                 objs = virtio_gpu_array_alloc(1);
212                 if (!objs)
213                         goto err_free_entry;
214                 virtio_gpu_array_add_obj(objs, &bo->base.base);
215
216                 ret = virtio_gpu_array_lock_resv(objs);
217                 if (ret != 0)
218                         goto err_put_objs;
219         }
220
221         if (params->blob) {
222                 if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
223                         bo->guest_blob = true;
224
225                 virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
226                                                     ents, nents);
227         } else if (params->virgl) {
228                 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
229                                                   objs, fence);
230                 virtio_gpu_object_attach(vgdev, bo, ents, nents);
231         } else {
232                 virtio_gpu_cmd_create_resource(vgdev, bo, params,
233                                                objs, fence);
234                 virtio_gpu_object_attach(vgdev, bo, ents, nents);
235         }
236
237         *bo_ptr = bo;
238         return 0;
239
240 err_put_objs:
241         virtio_gpu_array_put_free(objs);
242 err_free_entry:
243         kvfree(ents);
244 err_put_id:
245         virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
246 err_free_gem:
247         drm_gem_shmem_free(shmem_obj);
248         return ret;
249 }
This page took 0.049406 seconds and 4 git commands to generate.