1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
3 * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 * Permission is hereby granted, free of charge, to any person
7 * obtaining a copy of this software and associated documentation
8 * files (the "Software"), to deal in the Software without
9 * restriction, including without limitation the rights to use, copy,
10 * modify, merge, publish, distribute, sublicense, and/or sell copies
11 * of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
21 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
22 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
31 #include "drm/drm_prime.h"
32 #include "drm/drm_gem_ttm_helper.h"
34 #include <linux/debugfs.h>
36 static void vmw_gem_object_free(struct drm_gem_object *gobj)
38 struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
43 static int vmw_gem_object_open(struct drm_gem_object *obj,
44 struct drm_file *file_priv)
49 static void vmw_gem_object_close(struct drm_gem_object *obj,
50 struct drm_file *file_priv)
54 static int vmw_gem_object_pin(struct drm_gem_object *obj)
56 struct vmw_bo *vbo = to_vmw_bo(obj);
58 vmw_bo_pin_reserved(vbo, true);
63 static void vmw_gem_object_unpin(struct drm_gem_object *obj)
65 struct vmw_bo *vbo = to_vmw_bo(obj);
67 vmw_bo_pin_reserved(vbo, false);
70 static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
72 struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
73 struct vmw_ttm_tt *vmw_tt =
74 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
77 return vmw_tt->vsgt.sgt;
79 return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
82 static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
84 struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
87 if (obj->import_attach) {
88 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
90 if (drm_WARN_ON(obj->dev, map->is_iomem)) {
91 dma_buf_vunmap(obj->import_attach->dmabuf, map);
96 ret = ttm_bo_vmap(bo, map);
102 static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
104 if (obj->import_attach)
105 dma_buf_vunmap(obj->import_attach->dmabuf, map);
107 drm_gem_ttm_vunmap(obj, map);
110 static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
114 if (obj->import_attach) {
116 * Reset both vm_ops and vm_private_data, so we don't end up with
117 * vm_ops pointing to our implementation if the dma-buf backend
118 * doesn't set those fields.
120 vma->vm_private_data = NULL;
123 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
125 /* Drop the reference drm_gem_mmap_obj() acquired.*/
127 drm_gem_object_put(obj);
132 return drm_gem_ttm_mmap(obj, vma);
135 static const struct vm_operations_struct vmw_vm_ops = {
136 .pfn_mkwrite = vmw_bo_vm_mkwrite,
137 .page_mkwrite = vmw_bo_vm_mkwrite,
138 .fault = vmw_bo_vm_fault,
139 .open = ttm_bo_vm_open,
140 .close = ttm_bo_vm_close,
143 static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
144 .free = vmw_gem_object_free,
145 .open = vmw_gem_object_open,
146 .close = vmw_gem_object_close,
147 .print_info = drm_gem_ttm_print_info,
148 .pin = vmw_gem_object_pin,
149 .unpin = vmw_gem_object_unpin,
150 .get_sg_table = vmw_gem_object_get_sg_table,
151 .vmap = vmw_gem_vmap,
152 .vunmap = vmw_gem_vunmap,
153 .mmap = vmw_gem_mmap,
154 .vm_ops = &vmw_vm_ops,
157 int vmw_gem_object_create(struct vmw_private *vmw,
158 struct vmw_bo_params *params,
159 struct vmw_bo **p_vbo)
161 int ret = vmw_bo_create(vmw, params, p_vbo);
166 (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
171 int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
172 struct drm_file *filp,
175 struct vmw_bo **p_vbo)
178 struct vmw_bo_params params = {
179 .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
180 .busy_domain = VMW_BO_DOMAIN_SYS,
181 .bo_type = ttm_bo_type_device,
186 ret = vmw_gem_object_create(dev_priv, ¶ms, p_vbo);
190 ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
195 struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
196 struct dma_buf_attachment *attach,
197 struct sg_table *table)
200 struct vmw_private *dev_priv = vmw_priv(dev);
201 struct drm_gem_object *gem = NULL;
203 struct vmw_bo_params params = {
204 .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
205 .busy_domain = VMW_BO_DOMAIN_SYS,
206 .bo_type = ttm_bo_type_sg,
207 .size = attach->dmabuf->size,
210 .resv = attach->dmabuf->resv,
215 dma_resv_lock(params.resv, NULL);
217 ret = vmw_bo_create(dev_priv, ¶ms, &vbo);
221 vbo->tbo.base.funcs = &vmw_gem_object_funcs;
223 gem = &vbo->tbo.base;
225 dma_resv_unlock(params.resv);
229 int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
230 struct drm_file *filp)
232 struct vmw_private *dev_priv = vmw_priv(dev);
233 union drm_vmw_alloc_dmabuf_arg *arg =
234 (union drm_vmw_alloc_dmabuf_arg *)data;
235 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
236 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
241 ret = vmw_gem_object_create_with_handle(dev_priv, filp,
242 req->size, &handle, &vbo);
246 rep->handle = handle;
247 rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node);
248 rep->cur_gmr_id = handle;
249 rep->cur_gmr_offset = 0;
250 /* drop reference from allocate - handle holds it now */
251 drm_gem_object_put(&vbo->tbo.base);
256 #if defined(CONFIG_DEBUG_FS)
258 static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
260 const char *placement;
263 switch (bo->tbo.resource->mem_type) {
284 switch (bo->tbo.type) {
285 case ttm_bo_type_device:
288 case ttm_bo_type_kernel:
299 seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
300 id, bo->tbo.base.size, placement, type);
301 seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
304 kref_read(&bo->tbo.base.refcount),
305 kref_read(&bo->tbo.kref));
309 static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
311 struct vmw_private *vdev = (struct vmw_private *)m->private;
312 struct drm_device *dev = &vdev->drm;
313 struct drm_file *file;
316 r = mutex_lock_interruptible(&dev->filelist_mutex);
320 list_for_each_entry(file, &dev->filelist, lhead) {
321 struct task_struct *task;
322 struct drm_gem_object *gobj;
327 * Although we have a valid reference on file->pid, that does
328 * not guarantee that the task_struct who called get_pid() is
329 * still alive (e.g. get_pid(current) => fork() => exit()).
330 * Therefore, we need to protect this ->comm access using RCU.
333 pid = rcu_dereference(file->pid);
334 task = pid_task(pid, PIDTYPE_TGID);
335 seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
336 task ? task->comm : "<unknown>");
339 spin_lock(&file->table_lock);
340 idr_for_each_entry(&file->object_idr, gobj, id) {
341 struct vmw_bo *bo = to_vmw_bo(gobj);
343 vmw_bo_print_info(id, bo, m);
345 spin_unlock(&file->table_lock);
348 mutex_unlock(&dev->filelist_mutex);
352 DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info);
356 void vmw_debugfs_gem_init(struct vmw_private *vdev)
358 #if defined(CONFIG_DEBUG_FS)
359 struct drm_minor *minor = vdev->drm.primary;
360 struct dentry *root = minor->debugfs_root;
362 debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev,
363 &vmw_debugfs_gem_info_fops);