]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
Backmerge tag 'v6.9-rc5' into drm-next
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_gem.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /*
3  * Copyright 2021-2023 VMware, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person
6  * obtaining a copy of this software and associated documentation
7  * files (the "Software"), to deal in the Software without
8  * restriction, including without limitation the rights to use, copy,
9  * modify, merge, publish, distribute, sublicense, and/or sell copies
10  * of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be
14  * included in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  */
26
27 #include "vmwgfx_bo.h"
28 #include "vmwgfx_drv.h"
29
30 #include "drm/drm_prime.h"
31 #include "drm/drm_gem_ttm_helper.h"
32
33 static void vmw_gem_object_free(struct drm_gem_object *gobj)
34 {
35         struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
36         if (bo)
37                 ttm_bo_put(bo);
38 }
39
40 static int vmw_gem_object_open(struct drm_gem_object *obj,
41                                struct drm_file *file_priv)
42 {
43         return 0;
44 }
45
46 static void vmw_gem_object_close(struct drm_gem_object *obj,
47                                  struct drm_file *file_priv)
48 {
49 }
50
51 static int vmw_gem_object_pin(struct drm_gem_object *obj)
52 {
53         struct vmw_bo *vbo = to_vmw_bo(obj);
54
55         vmw_bo_pin_reserved(vbo, true);
56
57         return 0;
58 }
59
60 static void vmw_gem_object_unpin(struct drm_gem_object *obj)
61 {
62         struct vmw_bo *vbo = to_vmw_bo(obj);
63
64         vmw_bo_pin_reserved(vbo, false);
65 }
66
67 static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
68 {
69         struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
70         struct vmw_ttm_tt *vmw_tt =
71                 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
72
73         if (vmw_tt->vsgt.sgt)
74                 return vmw_tt->vsgt.sgt;
75
76         return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
77 }
78
79 static const struct vm_operations_struct vmw_vm_ops = {
80         .pfn_mkwrite = vmw_bo_vm_mkwrite,
81         .page_mkwrite = vmw_bo_vm_mkwrite,
82         .fault = vmw_bo_vm_fault,
83         .open = ttm_bo_vm_open,
84         .close = ttm_bo_vm_close,
85 };
86
87 static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
88         .free = vmw_gem_object_free,
89         .open = vmw_gem_object_open,
90         .close = vmw_gem_object_close,
91         .print_info = drm_gem_ttm_print_info,
92         .pin = vmw_gem_object_pin,
93         .unpin = vmw_gem_object_unpin,
94         .get_sg_table = vmw_gem_object_get_sg_table,
95         .vmap = drm_gem_ttm_vmap,
96         .vunmap = drm_gem_ttm_vunmap,
97         .mmap = drm_gem_ttm_mmap,
98         .vm_ops = &vmw_vm_ops,
99 };
100
101 int vmw_gem_object_create(struct vmw_private *vmw,
102                           struct vmw_bo_params *params,
103                           struct vmw_bo **p_vbo)
104 {
105         int ret = vmw_bo_create(vmw, params, p_vbo);
106
107         if (ret != 0)
108                 goto out_no_bo;
109
110         (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
111 out_no_bo:
112         return ret;
113 }
114
115 int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
116                                       struct drm_file *filp,
117                                       uint32_t size,
118                                       uint32_t *handle,
119                                       struct vmw_bo **p_vbo)
120 {
121         int ret;
122         struct vmw_bo_params params = {
123                 .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
124                 .busy_domain = VMW_BO_DOMAIN_SYS,
125                 .bo_type = ttm_bo_type_device,
126                 .size = size,
127                 .pin = false
128         };
129
130         ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
131         if (ret != 0)
132                 goto out_no_bo;
133
134         ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
135 out_no_bo:
136         return ret;
137 }
138
139 struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
140                                                  struct dma_buf_attachment *attach,
141                                                  struct sg_table *table)
142 {
143         int ret;
144         struct vmw_private *dev_priv = vmw_priv(dev);
145         struct drm_gem_object *gem = NULL;
146         struct vmw_bo *vbo;
147         struct vmw_bo_params params = {
148                 .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
149                 .busy_domain = VMW_BO_DOMAIN_SYS,
150                 .bo_type = ttm_bo_type_sg,
151                 .size = attach->dmabuf->size,
152                 .pin = false,
153                 .resv = attach->dmabuf->resv,
154                 .sg = table,
155
156         };
157
158         dma_resv_lock(params.resv, NULL);
159
160         ret = vmw_bo_create(dev_priv, &params, &vbo);
161         if (ret != 0)
162                 goto out_no_bo;
163
164         vbo->tbo.base.funcs = &vmw_gem_object_funcs;
165
166         gem = &vbo->tbo.base;
167 out_no_bo:
168         dma_resv_unlock(params.resv);
169         return gem;
170 }
171
172 int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
173                                 struct drm_file *filp)
174 {
175         struct vmw_private *dev_priv = vmw_priv(dev);
176         union drm_vmw_alloc_dmabuf_arg *arg =
177             (union drm_vmw_alloc_dmabuf_arg *)data;
178         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
179         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
180         struct vmw_bo *vbo;
181         uint32_t handle;
182         int ret;
183
184         ret = vmw_gem_object_create_with_handle(dev_priv, filp,
185                                                 req->size, &handle, &vbo);
186         if (ret)
187                 goto out_no_bo;
188
189         rep->handle = handle;
190         rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node);
191         rep->cur_gmr_id = handle;
192         rep->cur_gmr_offset = 0;
193         /* drop reference from allocate - handle holds it now */
194         drm_gem_object_put(&vbo->tbo.base);
195 out_no_bo:
196         return ret;
197 }
198
199 #if defined(CONFIG_DEBUG_FS)
200
201 static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
202 {
203         const char *placement;
204         const char *type;
205
206         switch (bo->tbo.resource->mem_type) {
207         case TTM_PL_SYSTEM:
208                 placement = " CPU";
209                 break;
210         case VMW_PL_GMR:
211                 placement = " GMR";
212                 break;
213         case VMW_PL_MOB:
214                 placement = " MOB";
215                 break;
216         case VMW_PL_SYSTEM:
217                 placement = "VCPU";
218                 break;
219         case TTM_PL_VRAM:
220                 placement = "VRAM";
221                 break;
222         default:
223                 placement = "None";
224                 break;
225         }
226
227         switch (bo->tbo.type) {
228         case ttm_bo_type_device:
229                 type = "device";
230                 break;
231         case ttm_bo_type_kernel:
232                 type = "kernel";
233                 break;
234         case ttm_bo_type_sg:
235                 type = "sg    ";
236                 break;
237         default:
238                 type = "none  ";
239                 break;
240         }
241
242         seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
243                    id, bo->tbo.base.size, placement, type);
244         seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
245                    bo->tbo.priority,
246                    bo->tbo.pin_count,
247                    kref_read(&bo->tbo.base.refcount),
248                    kref_read(&bo->tbo.kref));
249         seq_puts(m, "\n");
250 }
251
252 static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
253 {
254         struct vmw_private *vdev = (struct vmw_private *)m->private;
255         struct drm_device *dev = &vdev->drm;
256         struct drm_file *file;
257         int r;
258
259         r = mutex_lock_interruptible(&dev->filelist_mutex);
260         if (r)
261                 return r;
262
263         list_for_each_entry(file, &dev->filelist, lhead) {
264                 struct task_struct *task;
265                 struct drm_gem_object *gobj;
266                 struct pid *pid;
267                 int id;
268
269                 /*
270                  * Although we have a valid reference on file->pid, that does
271                  * not guarantee that the task_struct who called get_pid() is
272                  * still alive (e.g. get_pid(current) => fork() => exit()).
273                  * Therefore, we need to protect this ->comm access using RCU.
274                  */
275                 rcu_read_lock();
276                 pid = rcu_dereference(file->pid);
277                 task = pid_task(pid, PIDTYPE_TGID);
278                 seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
279                            task ? task->comm : "<unknown>");
280                 rcu_read_unlock();
281
282                 spin_lock(&file->table_lock);
283                 idr_for_each_entry(&file->object_idr, gobj, id) {
284                         struct vmw_bo *bo = to_vmw_bo(gobj);
285
286                         vmw_bo_print_info(id, bo, m);
287                 }
288                 spin_unlock(&file->table_lock);
289         }
290
291         mutex_unlock(&dev->filelist_mutex);
292         return 0;
293 }
294
295 DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info);
296
297 #endif
298
299 void vmw_debugfs_gem_init(struct vmw_private *vdev)
300 {
301 #if defined(CONFIG_DEBUG_FS)
302         struct drm_minor *minor = vdev->drm.primary;
303         struct dentry *root = minor->debugfs_root;
304
305         debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev,
306                             &vmw_debugfs_gem_info_fops);
307 #endif
308 }
This page took 0.053189 seconds and 4 git commands to generate.