]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/ttm_object.c
Merge tag 'net-6.10-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux.git] / drivers / gpu / drm / vmwgfx / ttm_object.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  *
31  * While no substantial code is shared, the prime code is inspired by
32  * drm_prime.c, with
33  * Authors:
34  *      Dave Airlie <[email protected]>
35  *      Rob Clark <[email protected]>
36  */
37 /** @file ttm_ref_object.c
38  *
39  * Base- and reference object implementation for the various
40  * ttm objects. Implements reference counting, minimal security checks
41  * and release on file close.
42  */
43
44
45 #define pr_fmt(fmt) "[TTM] " fmt
46
47 #include "ttm_object.h"
48 #include "vmwgfx_drv.h"
49
50 #include <linux/list.h>
51 #include <linux/spinlock.h>
52 #include <linux/slab.h>
53 #include <linux/atomic.h>
54 #include <linux/module.h>
55 #include <linux/hashtable.h>
56
57 MODULE_IMPORT_NS(DMA_BUF);
58
59 #define VMW_TTM_OBJECT_REF_HT_ORDER 10
60
61 /**
62  * struct ttm_object_file
63  *
64  * @tdev: Pointer to the ttm_object_device.
65  *
66  * @lock: Lock that protects the ref_list list and the
67  * ref_hash hash tables.
68  *
69  * @ref_list: List of ttm_ref_objects to be destroyed at
70  * file release.
71  *
72  * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
73  * for fast lookup of ref objects given a base object.
74  *
75  * @refcount: reference/usage count
76  */
77 struct ttm_object_file {
78         struct ttm_object_device *tdev;
79         spinlock_t lock;
80         struct list_head ref_list;
81         DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
82         struct kref refcount;
83 };
84
85 /*
86  * struct ttm_object_device
87  *
88  * @object_lock: lock that protects idr.
89  *
90  * This is the per-device data structure needed for ttm object management.
91  */
92
93 struct ttm_object_device {
94         spinlock_t object_lock;
95         struct dma_buf_ops ops;
96         void (*dmabuf_release)(struct dma_buf *dma_buf);
97         struct idr idr;
98 };
99
100 /*
101  * struct ttm_ref_object
102  *
103  * @hash: Hash entry for the per-file object reference hash.
104  *
105  * @head: List entry for the per-file list of ref-objects.
106  *
107  * @kref: Ref count.
108  *
109  * @obj: Base object this ref object is referencing.
110  *
111  * @ref_type: Type of ref object.
112  *
113  * This is similar to an idr object, but it also has a hash table entry
114  * that allows lookup with a pointer to the referenced object as a key. In
115  * that way, one can easily detect whether a base object is referenced by
116  * a particular ttm_object_file. It also carries a ref count to avoid creating
117  * multiple ref objects if a ttm_object_file references the same base
118  * object more than once.
119  */
120
121 struct ttm_ref_object {
122         struct rcu_head rcu_head;
123         struct vmwgfx_hash_item hash;
124         struct list_head head;
125         struct kref kref;
126         struct ttm_base_object *obj;
127         struct ttm_object_file *tfile;
128 };
129
130 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
131
132 static inline struct ttm_object_file *
133 ttm_object_file_ref(struct ttm_object_file *tfile)
134 {
135         kref_get(&tfile->refcount);
136         return tfile;
137 }
138
139 static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
140                                   uint64_t key,
141                                   struct vmwgfx_hash_item **p_hash)
142 {
143         struct vmwgfx_hash_item *hash;
144
145         hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
146                 if (hash->key == key) {
147                         *p_hash = hash;
148                         return 0;
149                 }
150         }
151         return -EINVAL;
152 }
153
154 static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
155                               uint64_t key,
156                               struct vmwgfx_hash_item **p_hash)
157 {
158         struct vmwgfx_hash_item *hash;
159
160         hash_for_each_possible(tfile->ref_hash, hash, head, key) {
161                 if (hash->key == key) {
162                         *p_hash = hash;
163                         return 0;
164                 }
165         }
166         return -EINVAL;
167 }
168
169 static void ttm_object_file_destroy(struct kref *kref)
170 {
171         struct ttm_object_file *tfile =
172                 container_of(kref, struct ttm_object_file, refcount);
173
174         kfree(tfile);
175 }
176
177
178 static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
179 {
180         struct ttm_object_file *tfile = *p_tfile;
181
182         *p_tfile = NULL;
183         kref_put(&tfile->refcount, ttm_object_file_destroy);
184 }
185
186
187 int ttm_base_object_init(struct ttm_object_file *tfile,
188                          struct ttm_base_object *base,
189                          bool shareable,
190                          enum ttm_object_type object_type,
191                          void (*refcount_release) (struct ttm_base_object **))
192 {
193         struct ttm_object_device *tdev = tfile->tdev;
194         int ret;
195
196         base->shareable = shareable;
197         base->tfile = ttm_object_file_ref(tfile);
198         base->refcount_release = refcount_release;
199         base->object_type = object_type;
200         kref_init(&base->refcount);
201         idr_preload(GFP_KERNEL);
202         spin_lock(&tdev->object_lock);
203         ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
204         spin_unlock(&tdev->object_lock);
205         idr_preload_end();
206         if (ret < 0)
207                 return ret;
208
209         base->handle = ret;
210         ret = ttm_ref_object_add(tfile, base, NULL, false);
211         if (unlikely(ret != 0))
212                 goto out_err1;
213
214         ttm_base_object_unref(&base);
215
216         return 0;
217 out_err1:
218         spin_lock(&tdev->object_lock);
219         idr_remove(&tdev->idr, base->handle);
220         spin_unlock(&tdev->object_lock);
221         return ret;
222 }
223
224 static void ttm_release_base(struct kref *kref)
225 {
226         struct ttm_base_object *base =
227             container_of(kref, struct ttm_base_object, refcount);
228         struct ttm_object_device *tdev = base->tfile->tdev;
229
230         spin_lock(&tdev->object_lock);
231         idr_remove(&tdev->idr, base->handle);
232         spin_unlock(&tdev->object_lock);
233
234         /*
235          * Note: We don't use synchronize_rcu() here because it's far
236          * too slow. It's up to the user to free the object using
237          * call_rcu() or ttm_base_object_kfree().
238          */
239
240         ttm_object_file_unref(&base->tfile);
241         if (base->refcount_release)
242                 base->refcount_release(&base);
243 }
244
245 void ttm_base_object_unref(struct ttm_base_object **p_base)
246 {
247         struct ttm_base_object *base = *p_base;
248
249         *p_base = NULL;
250
251         kref_put(&base->refcount, ttm_release_base);
252 }
253
254 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
255                                                uint64_t key)
256 {
257         struct ttm_base_object *base = NULL;
258         struct vmwgfx_hash_item *hash;
259         int ret;
260
261         spin_lock(&tfile->lock);
262         ret = ttm_tfile_find_ref(tfile, key, &hash);
263
264         if (likely(ret == 0)) {
265                 base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
266                 if (!kref_get_unless_zero(&base->refcount))
267                         base = NULL;
268         }
269         spin_unlock(&tfile->lock);
270
271
272         return base;
273 }
274
275 struct ttm_base_object *
276 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
277 {
278         struct ttm_base_object *base;
279
280         rcu_read_lock();
281         base = idr_find(&tdev->idr, key);
282
283         if (base && !kref_get_unless_zero(&base->refcount))
284                 base = NULL;
285         rcu_read_unlock();
286
287         return base;
288 }
289
290 int ttm_ref_object_add(struct ttm_object_file *tfile,
291                        struct ttm_base_object *base,
292                        bool *existed,
293                        bool require_existed)
294 {
295         struct ttm_ref_object *ref;
296         struct vmwgfx_hash_item *hash;
297         int ret = -EINVAL;
298
299         if (base->tfile != tfile && !base->shareable)
300                 return -EPERM;
301
302         if (existed != NULL)
303                 *existed = true;
304
305         while (ret == -EINVAL) {
306                 rcu_read_lock();
307                 ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
308
309                 if (ret == 0) {
310                         ref = hlist_entry(hash, struct ttm_ref_object, hash);
311                         if (kref_get_unless_zero(&ref->kref)) {
312                                 rcu_read_unlock();
313                                 break;
314                         }
315                 }
316
317                 rcu_read_unlock();
318                 if (require_existed)
319                         return -EPERM;
320
321                 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
322                 if (unlikely(ref == NULL)) {
323                         return -ENOMEM;
324                 }
325
326                 ref->hash.key = base->handle;
327                 ref->obj = base;
328                 ref->tfile = tfile;
329                 kref_init(&ref->kref);
330
331                 spin_lock(&tfile->lock);
332                 hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
333                 ret = 0;
334
335                 list_add_tail(&ref->head, &tfile->ref_list);
336                 kref_get(&base->refcount);
337                 spin_unlock(&tfile->lock);
338                 if (existed != NULL)
339                         *existed = false;
340         }
341
342         return ret;
343 }
344
345 static void __releases(tfile->lock) __acquires(tfile->lock)
346 ttm_ref_object_release(struct kref *kref)
347 {
348         struct ttm_ref_object *ref =
349             container_of(kref, struct ttm_ref_object, kref);
350         struct ttm_object_file *tfile = ref->tfile;
351
352         hash_del_rcu(&ref->hash.head);
353         list_del(&ref->head);
354         spin_unlock(&tfile->lock);
355
356         ttm_base_object_unref(&ref->obj);
357         kfree_rcu(ref, rcu_head);
358         spin_lock(&tfile->lock);
359 }
360
361 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
362                               unsigned long key)
363 {
364         struct ttm_ref_object *ref;
365         struct vmwgfx_hash_item *hash;
366         int ret;
367
368         spin_lock(&tfile->lock);
369         ret = ttm_tfile_find_ref(tfile, key, &hash);
370         if (unlikely(ret != 0)) {
371                 spin_unlock(&tfile->lock);
372                 return -EINVAL;
373         }
374         ref = hlist_entry(hash, struct ttm_ref_object, hash);
375         kref_put(&ref->kref, ttm_ref_object_release);
376         spin_unlock(&tfile->lock);
377         return 0;
378 }
379
380 void ttm_object_file_release(struct ttm_object_file **p_tfile)
381 {
382         struct ttm_ref_object *ref;
383         struct list_head *list;
384         struct ttm_object_file *tfile = *p_tfile;
385
386         *p_tfile = NULL;
387         spin_lock(&tfile->lock);
388
389         /*
390          * Since we release the lock within the loop, we have to
391          * restart it from the beginning each time.
392          */
393
394         while (!list_empty(&tfile->ref_list)) {
395                 list = tfile->ref_list.next;
396                 ref = list_entry(list, struct ttm_ref_object, head);
397                 ttm_ref_object_release(&ref->kref);
398         }
399
400         spin_unlock(&tfile->lock);
401
402         ttm_object_file_unref(&tfile);
403 }
404
405 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
406 {
407         struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
408
409         if (unlikely(tfile == NULL))
410                 return NULL;
411
412         spin_lock_init(&tfile->lock);
413         tfile->tdev = tdev;
414         kref_init(&tfile->refcount);
415         INIT_LIST_HEAD(&tfile->ref_list);
416
417         hash_init(tfile->ref_hash);
418
419         return tfile;
420 }
421
422 struct ttm_object_device *
423 ttm_object_device_init(const struct dma_buf_ops *ops)
424 {
425         struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
426
427         if (unlikely(tdev == NULL))
428                 return NULL;
429
430         spin_lock_init(&tdev->object_lock);
431
432         /*
433          * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
434          * a seperate namespace for GEM handles (which are
435          * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
436          * can take either handle as an argument so we want to
437          * easily be able to tell whether the handle refers to a
438          * GEM buffer or a surface.
439          */
440         idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
441         tdev->ops = *ops;
442         tdev->dmabuf_release = tdev->ops.release;
443         tdev->ops.release = ttm_prime_dmabuf_release;
444         return tdev;
445 }
446
447 void ttm_object_device_release(struct ttm_object_device **p_tdev)
448 {
449         struct ttm_object_device *tdev = *p_tdev;
450
451         *p_tdev = NULL;
452
453         WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
454         idr_destroy(&tdev->idr);
455
456         kfree(tdev);
457 }
458
459 /**
460  * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
461  *
462  * @dmabuf: Non-refcounted pointer to a struct dma-buf.
463  *
464  * Obtain a file reference from a lookup structure that doesn't refcount
465  * the file, but synchronizes with its release method to make sure it has
466  * not been freed yet. See for example kref_get_unless_zero documentation.
467  * Returns true if refcounting succeeds, false otherwise.
468  *
469  * Nobody really wants this as a public API yet, so let it mature here
470  * for some time...
471  */
472 static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
473 {
474         return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
475 }
476
477 /**
478  * ttm_prime_refcount_release - refcount release method for a prime object.
479  *
480  * @p_base: Pointer to ttm_base_object pointer.
481  *
482  * This is a wrapper that calls the refcount_release founction of the
483  * underlying object. At the same time it cleans up the prime object.
484  * This function is called when all references to the base object we
485  * derive from are gone.
486  */
487 static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
488 {
489         struct ttm_base_object *base = *p_base;
490         struct ttm_prime_object *prime;
491
492         *p_base = NULL;
493         prime = container_of(base, struct ttm_prime_object, base);
494         BUG_ON(prime->dma_buf != NULL);
495         mutex_destroy(&prime->mutex);
496         if (prime->refcount_release)
497                 prime->refcount_release(&base);
498 }
499
500 /**
501  * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
502  *
503  * @dma_buf:
504  *
505  * This function first calls the dma_buf release method the driver
506  * provides. Then it cleans up our dma_buf pointer used for lookup,
507  * and finally releases the reference the dma_buf has on our base
508  * object.
509  */
510 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
511 {
512         struct ttm_prime_object *prime =
513                 (struct ttm_prime_object *) dma_buf->priv;
514         struct ttm_base_object *base = &prime->base;
515         struct ttm_object_device *tdev = base->tfile->tdev;
516
517         if (tdev->dmabuf_release)
518                 tdev->dmabuf_release(dma_buf);
519         mutex_lock(&prime->mutex);
520         if (prime->dma_buf == dma_buf)
521                 prime->dma_buf = NULL;
522         mutex_unlock(&prime->mutex);
523         ttm_base_object_unref(&base);
524 }
525
526 /**
527  * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
528  *
529  * @tfile: A struct ttm_object_file identifying the caller.
530  * @fd: The prime / dmabuf fd.
531  * @handle: The returned handle.
532  *
533  * This function returns a handle to an object that previously exported
534  * a dma-buf. Note that we don't handle imports yet, because we simply
535  * have no consumers of that implementation.
536  */
537 int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
538                            int fd, u32 *handle)
539 {
540         struct ttm_object_device *tdev = tfile->tdev;
541         struct dma_buf *dma_buf;
542         struct ttm_prime_object *prime;
543         struct ttm_base_object *base;
544         int ret;
545
546         dma_buf = dma_buf_get(fd);
547         if (IS_ERR(dma_buf))
548                 return PTR_ERR(dma_buf);
549
550         if (dma_buf->ops != &tdev->ops)
551                 return -ENOSYS;
552
553         prime = (struct ttm_prime_object *) dma_buf->priv;
554         base = &prime->base;
555         *handle = base->handle;
556         ret = ttm_ref_object_add(tfile, base, NULL, false);
557
558         dma_buf_put(dma_buf);
559
560         return ret;
561 }
562
563 /**
564  * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
565  *
566  * @tfile: Struct ttm_object_file identifying the caller.
567  * @handle: Handle to the object we're exporting from.
568  * @flags: flags for dma-buf creation. We just pass them on.
569  * @prime_fd: The returned file descriptor.
570  *
571  */
572 int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
573                            uint32_t handle, uint32_t flags,
574                            int *prime_fd)
575 {
576         struct ttm_object_device *tdev = tfile->tdev;
577         struct ttm_base_object *base;
578         struct dma_buf *dma_buf;
579         struct ttm_prime_object *prime;
580         int ret;
581
582         base = ttm_base_object_lookup(tfile, handle);
583         if (unlikely(base == NULL ||
584                      base->object_type != ttm_prime_type)) {
585                 ret = -ENOENT;
586                 goto out_unref;
587         }
588
589         prime = container_of(base, struct ttm_prime_object, base);
590         if (unlikely(!base->shareable)) {
591                 ret = -EPERM;
592                 goto out_unref;
593         }
594
595         ret = mutex_lock_interruptible(&prime->mutex);
596         if (unlikely(ret != 0)) {
597                 ret = -ERESTARTSYS;
598                 goto out_unref;
599         }
600
601         dma_buf = prime->dma_buf;
602         if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
603                 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
604                 exp_info.ops = &tdev->ops;
605                 exp_info.size = prime->size;
606                 exp_info.flags = flags;
607                 exp_info.priv = prime;
608
609                 /*
610                  * Need to create a new dma_buf
611                  */
612
613                 dma_buf = dma_buf_export(&exp_info);
614                 if (IS_ERR(dma_buf)) {
615                         ret = PTR_ERR(dma_buf);
616                         mutex_unlock(&prime->mutex);
617                         goto out_unref;
618                 }
619
620                 /*
621                  * dma_buf has taken the base object reference
622                  */
623                 base = NULL;
624                 prime->dma_buf = dma_buf;
625         }
626         mutex_unlock(&prime->mutex);
627
628         ret = dma_buf_fd(dma_buf, flags);
629         if (ret >= 0) {
630                 *prime_fd = ret;
631                 ret = 0;
632         } else
633                 dma_buf_put(dma_buf);
634
635 out_unref:
636         if (base)
637                 ttm_base_object_unref(&base);
638         return ret;
639 }
640
641 /**
642  * ttm_prime_object_init - Initialize a ttm_prime_object
643  *
644  * @tfile: struct ttm_object_file identifying the caller
645  * @size: The size of the dma_bufs we export.
646  * @prime: The object to be initialized.
647  * @type: See ttm_base_object_init
648  * @refcount_release: See ttm_base_object_init
649  *
650  * Initializes an object which is compatible with the drm_prime model
651  * for data sharing between processes and devices.
652  */
653 int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
654                           struct ttm_prime_object *prime,
655                           enum ttm_object_type type,
656                           void (*refcount_release) (struct ttm_base_object **))
657 {
658         bool shareable = !!(type == VMW_RES_SURFACE);
659         mutex_init(&prime->mutex);
660         prime->size = PAGE_ALIGN(size);
661         prime->real_type = type;
662         prime->dma_buf = NULL;
663         prime->refcount_release = refcount_release;
664         return ttm_base_object_init(tfile, &prime->base, shareable,
665                                     ttm_prime_type,
666                                     ttm_prime_refcount_release);
667 }
This page took 0.072298 seconds and 4 git commands to generate.