]> Git Repo - linux.git/blob - drivers/gpu/drm/drm_syncobj.c
Merge branch 'drm-armada-devel-4.15' of git://git.armlinux.org.uk/~rmk/linux-arm...
[linux.git] / drivers / gpu / drm / drm_syncobj.c
1 /*
2  * Copyright 2017 Red Hat
3  * Parts ported from amdgpu (fence wait code).
4  * Copyright 2016 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *
27  */
28
29 /**
30  * DOC: Overview
31  *
32  * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33  * persistent objects that contain an optional fence. The fence can be updated
34  * with a new fence, or be NULL.
35  *
36  * syncobj's can be waited upon, where it will wait for the underlying
37  * fence.
38  *
39  * syncobj's can be export to fd's and back, these fd's are opaque and
40  * have no other use case, except passing the syncobj between processes.
41  *
42  * Their primary use-case is to implement Vulkan fences and semaphores.
43  *
44  * syncobj have a kref reference count, but also have an optional file.
45  * The file is only created once the syncobj is exported.
46  * The file takes a reference on the kref.
47  */
48
49 #include <drm/drmP.h>
50 #include <linux/file.h>
51 #include <linux/fs.h>
52 #include <linux/anon_inodes.h>
53 #include <linux/sync_file.h>
54 #include <linux/sched/signal.h>
55
56 #include "drm_internal.h"
57 #include <drm/drm_syncobj.h>
58
59 /**
60  * drm_syncobj_find - lookup and reference a sync object.
61  * @file_private: drm file private pointer
62  * @handle: sync object handle to lookup.
63  *
64  * Returns a reference to the syncobj pointed to by handle or NULL. The
65  * reference must be released by calling drm_syncobj_put().
66  */
67 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
68                                      u32 handle)
69 {
70         struct drm_syncobj *syncobj;
71
72         spin_lock(&file_private->syncobj_table_lock);
73
74         /* Check if we currently have a reference on the object */
75         syncobj = idr_find(&file_private->syncobj_idr, handle);
76         if (syncobj)
77                 drm_syncobj_get(syncobj);
78
79         spin_unlock(&file_private->syncobj_table_lock);
80
81         return syncobj;
82 }
83 EXPORT_SYMBOL(drm_syncobj_find);
84
85 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
86                                             struct drm_syncobj_cb *cb,
87                                             drm_syncobj_func_t func)
88 {
89         cb->func = func;
90         list_add_tail(&cb->node, &syncobj->cb_list);
91 }
92
93 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
94                                                  struct dma_fence **fence,
95                                                  struct drm_syncobj_cb *cb,
96                                                  drm_syncobj_func_t func)
97 {
98         int ret;
99
100         *fence = drm_syncobj_fence_get(syncobj);
101         if (*fence)
102                 return 1;
103
104         spin_lock(&syncobj->lock);
105         /* We've already tried once to get a fence and failed.  Now that we
106          * have the lock, try one more time just to be sure we don't add a
107          * callback when a fence has already been set.
108          */
109         if (syncobj->fence) {
110                 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
111                                                                  lockdep_is_held(&syncobj->lock)));
112                 ret = 1;
113         } else {
114                 *fence = NULL;
115                 drm_syncobj_add_callback_locked(syncobj, cb, func);
116                 ret = 0;
117         }
118         spin_unlock(&syncobj->lock);
119
120         return ret;
121 }
122
123 /**
124  * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
125  * @syncobj: Sync object to which to add the callback
126  * @cb: Callback to add
127  * @func: Func to use when initializing the drm_syncobj_cb struct
128  *
129  * This adds a callback to be called next time the fence is replaced
130  */
131 void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
132                               struct drm_syncobj_cb *cb,
133                               drm_syncobj_func_t func)
134 {
135         spin_lock(&syncobj->lock);
136         drm_syncobj_add_callback_locked(syncobj, cb, func);
137         spin_unlock(&syncobj->lock);
138 }
139 EXPORT_SYMBOL(drm_syncobj_add_callback);
140
141 /**
142  * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
143  * @syncobj: Sync object from which to remove the callback
144  * @cb: Callback to remove
145  */
146 void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
147                                  struct drm_syncobj_cb *cb)
148 {
149         spin_lock(&syncobj->lock);
150         list_del_init(&cb->node);
151         spin_unlock(&syncobj->lock);
152 }
153 EXPORT_SYMBOL(drm_syncobj_remove_callback);
154
155 /**
156  * drm_syncobj_replace_fence - replace fence in a sync object.
157  * @syncobj: Sync object to replace fence in
158  * @fence: fence to install in sync file.
159  *
160  * This replaces the fence on a sync object.
161  */
162 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
163                                struct dma_fence *fence)
164 {
165         struct dma_fence *old_fence;
166         struct drm_syncobj_cb *cur, *tmp;
167
168         if (fence)
169                 dma_fence_get(fence);
170
171         spin_lock(&syncobj->lock);
172
173         old_fence = rcu_dereference_protected(syncobj->fence,
174                                               lockdep_is_held(&syncobj->lock));
175         rcu_assign_pointer(syncobj->fence, fence);
176
177         if (fence != old_fence) {
178                 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
179                         list_del_init(&cur->node);
180                         cur->func(syncobj, cur);
181                 }
182         }
183
184         spin_unlock(&syncobj->lock);
185
186         dma_fence_put(old_fence);
187 }
188 EXPORT_SYMBOL(drm_syncobj_replace_fence);
189
190 struct drm_syncobj_null_fence {
191         struct dma_fence base;
192         spinlock_t lock;
193 };
194
195 static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
196 {
197         return "syncobjnull";
198 }
199
200 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
201 {
202     dma_fence_enable_sw_signaling(fence);
203     return !dma_fence_is_signaled(fence);
204 }
205
206 static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
207         .get_driver_name = drm_syncobj_null_fence_get_name,
208         .get_timeline_name = drm_syncobj_null_fence_get_name,
209         .enable_signaling = drm_syncobj_null_fence_enable_signaling,
210         .wait = dma_fence_default_wait,
211         .release = NULL,
212 };
213
214 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
215 {
216         struct drm_syncobj_null_fence *fence;
217         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
218         if (fence == NULL)
219                 return -ENOMEM;
220
221         spin_lock_init(&fence->lock);
222         dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops,
223                        &fence->lock, 0, 0);
224         dma_fence_signal(&fence->base);
225
226         drm_syncobj_replace_fence(syncobj, &fence->base);
227
228         dma_fence_put(&fence->base);
229
230         return 0;
231 }
232
233 /**
234  * drm_syncobj_find_fence - lookup and reference the fence in a sync object
235  * @file_private: drm file private pointer
236  * @handle: sync object handle to lookup.
237  * @fence: out parameter for the fence
238  *
239  * This is just a convenience function that combines drm_syncobj_find() and
240  * drm_syncobj_fence_get().
241  *
242  * Returns 0 on success or a negative error value on failure. On success @fence
243  * contains a reference to the fence, which must be released by calling
244  * dma_fence_put().
245  */
246 int drm_syncobj_find_fence(struct drm_file *file_private,
247                            u32 handle,
248                            struct dma_fence **fence)
249 {
250         struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
251         int ret = 0;
252
253         if (!syncobj)
254                 return -ENOENT;
255
256         *fence = drm_syncobj_fence_get(syncobj);
257         if (!*fence) {
258                 ret = -EINVAL;
259         }
260         drm_syncobj_put(syncobj);
261         return ret;
262 }
263 EXPORT_SYMBOL(drm_syncobj_find_fence);
264
265 /**
266  * drm_syncobj_free - free a sync object.
267  * @kref: kref to free.
268  *
269  * Only to be called from kref_put in drm_syncobj_put.
270  */
271 void drm_syncobj_free(struct kref *kref)
272 {
273         struct drm_syncobj *syncobj = container_of(kref,
274                                                    struct drm_syncobj,
275                                                    refcount);
276         drm_syncobj_replace_fence(syncobj, NULL);
277         kfree(syncobj);
278 }
279 EXPORT_SYMBOL(drm_syncobj_free);
280
281 /**
282  * drm_syncobj_create - create a new syncobj
283  * @out_syncobj: returned syncobj
284  * @flags: DRM_SYNCOBJ_* flags
285  * @fence: if non-NULL, the syncobj will represent this fence
286  *
287  * This is the first function to create a sync object. After creating, drivers
288  * probably want to make it available to userspace, either through
289  * drm_syncobj_get_handle() or drm_syncobj_get_fd().
290  *
291  * Returns 0 on success or a negative error value on failure.
292  */
293 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
294                        struct dma_fence *fence)
295 {
296         int ret;
297         struct drm_syncobj *syncobj;
298
299         syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
300         if (!syncobj)
301                 return -ENOMEM;
302
303         kref_init(&syncobj->refcount);
304         INIT_LIST_HEAD(&syncobj->cb_list);
305         spin_lock_init(&syncobj->lock);
306
307         if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
308                 ret = drm_syncobj_assign_null_handle(syncobj);
309                 if (ret < 0) {
310                         drm_syncobj_put(syncobj);
311                         return ret;
312                 }
313         }
314
315         if (fence)
316                 drm_syncobj_replace_fence(syncobj, fence);
317
318         *out_syncobj = syncobj;
319         return 0;
320 }
321 EXPORT_SYMBOL(drm_syncobj_create);
322
323 /**
324  * drm_syncobj_get_handle - get a handle from a syncobj
325  * @file_private: drm file private pointer
326  * @syncobj: Sync object to export
327  * @handle: out parameter with the new handle
328  *
329  * Exports a sync object created with drm_syncobj_create() as a handle on
330  * @file_private to userspace.
331  *
332  * Returns 0 on success or a negative error value on failure.
333  */
334 int drm_syncobj_get_handle(struct drm_file *file_private,
335                            struct drm_syncobj *syncobj, u32 *handle)
336 {
337         int ret;
338
339         /* take a reference to put in the idr */
340         drm_syncobj_get(syncobj);
341
342         idr_preload(GFP_KERNEL);
343         spin_lock(&file_private->syncobj_table_lock);
344         ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
345         spin_unlock(&file_private->syncobj_table_lock);
346
347         idr_preload_end();
348
349         if (ret < 0) {
350                 drm_syncobj_put(syncobj);
351                 return ret;
352         }
353
354         *handle = ret;
355         return 0;
356 }
357 EXPORT_SYMBOL(drm_syncobj_get_handle);
358
359 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
360                                         u32 *handle, uint32_t flags)
361 {
362         int ret;
363         struct drm_syncobj *syncobj;
364
365         ret = drm_syncobj_create(&syncobj, flags, NULL);
366         if (ret)
367                 return ret;
368
369         ret = drm_syncobj_get_handle(file_private, syncobj, handle);
370         drm_syncobj_put(syncobj);
371         return ret;
372 }
373
374 static int drm_syncobj_destroy(struct drm_file *file_private,
375                                u32 handle)
376 {
377         struct drm_syncobj *syncobj;
378
379         spin_lock(&file_private->syncobj_table_lock);
380         syncobj = idr_remove(&file_private->syncobj_idr, handle);
381         spin_unlock(&file_private->syncobj_table_lock);
382
383         if (!syncobj)
384                 return -EINVAL;
385
386         drm_syncobj_put(syncobj);
387         return 0;
388 }
389
390 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
391 {
392         struct drm_syncobj *syncobj = file->private_data;
393
394         drm_syncobj_put(syncobj);
395         return 0;
396 }
397
398 static const struct file_operations drm_syncobj_file_fops = {
399         .release = drm_syncobj_file_release,
400 };
401
402 static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
403 {
404         struct file *file = anon_inode_getfile("syncobj_file",
405                                                &drm_syncobj_file_fops,
406                                                syncobj, 0);
407         if (IS_ERR(file))
408                 return PTR_ERR(file);
409
410         drm_syncobj_get(syncobj);
411         if (cmpxchg(&syncobj->file, NULL, file)) {
412                 /* lost the race */
413                 fput(file);
414         }
415
416         return 0;
417 }
418
419 /**
420  * drm_syncobj_get_fd - get a file descriptor from a syncobj
421  * @syncobj: Sync object to export
422  * @p_fd: out parameter with the new file descriptor
423  *
424  * Exports a sync object created with drm_syncobj_create() as a file descriptor.
425  *
426  * Returns 0 on success or a negative error value on failure.
427  */
428 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
429 {
430         int ret;
431         int fd;
432
433         fd = get_unused_fd_flags(O_CLOEXEC);
434         if (fd < 0)
435                 return fd;
436
437         if (!syncobj->file) {
438                 ret = drm_syncobj_alloc_file(syncobj);
439                 if (ret) {
440                         put_unused_fd(fd);
441                         return ret;
442                 }
443         }
444         fd_install(fd, syncobj->file);
445         *p_fd = fd;
446         return 0;
447 }
448 EXPORT_SYMBOL(drm_syncobj_get_fd);
449
450 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
451                                     u32 handle, int *p_fd)
452 {
453         struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
454         int ret;
455
456         if (!syncobj)
457                 return -EINVAL;
458
459         ret = drm_syncobj_get_fd(syncobj, p_fd);
460         drm_syncobj_put(syncobj);
461         return ret;
462 }
463
464 static struct drm_syncobj *drm_syncobj_fdget(int fd)
465 {
466         struct file *file = fget(fd);
467
468         if (!file)
469                 return NULL;
470         if (file->f_op != &drm_syncobj_file_fops)
471                 goto err;
472
473         return file->private_data;
474 err:
475         fput(file);
476         return NULL;
477 };
478
479 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
480                                     int fd, u32 *handle)
481 {
482         struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
483         int ret;
484
485         if (!syncobj)
486                 return -EINVAL;
487
488         /* take a reference to put in the idr */
489         drm_syncobj_get(syncobj);
490
491         idr_preload(GFP_KERNEL);
492         spin_lock(&file_private->syncobj_table_lock);
493         ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
494         spin_unlock(&file_private->syncobj_table_lock);
495         idr_preload_end();
496
497         if (ret < 0) {
498                 fput(syncobj->file);
499                 return ret;
500         }
501         *handle = ret;
502         return 0;
503 }
504
505 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
506                                               int fd, int handle)
507 {
508         struct dma_fence *fence = sync_file_get_fence(fd);
509         struct drm_syncobj *syncobj;
510
511         if (!fence)
512                 return -EINVAL;
513
514         syncobj = drm_syncobj_find(file_private, handle);
515         if (!syncobj) {
516                 dma_fence_put(fence);
517                 return -ENOENT;
518         }
519
520         drm_syncobj_replace_fence(syncobj, fence);
521         dma_fence_put(fence);
522         drm_syncobj_put(syncobj);
523         return 0;
524 }
525
526 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
527                                         int handle, int *p_fd)
528 {
529         int ret;
530         struct dma_fence *fence;
531         struct sync_file *sync_file;
532         int fd = get_unused_fd_flags(O_CLOEXEC);
533
534         if (fd < 0)
535                 return fd;
536
537         ret = drm_syncobj_find_fence(file_private, handle, &fence);
538         if (ret)
539                 goto err_put_fd;
540
541         sync_file = sync_file_create(fence);
542
543         dma_fence_put(fence);
544
545         if (!sync_file) {
546                 ret = -EINVAL;
547                 goto err_put_fd;
548         }
549
550         fd_install(fd, sync_file->file);
551
552         *p_fd = fd;
553         return 0;
554 err_put_fd:
555         put_unused_fd(fd);
556         return ret;
557 }
558 /**
559  * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
560  * @file_private: drm file-private structure to set up
561  *
562  * Called at device open time, sets up the structure for handling refcounting
563  * of sync objects.
564  */
565 void
566 drm_syncobj_open(struct drm_file *file_private)
567 {
568         idr_init(&file_private->syncobj_idr);
569         spin_lock_init(&file_private->syncobj_table_lock);
570 }
571
572 static int
573 drm_syncobj_release_handle(int id, void *ptr, void *data)
574 {
575         struct drm_syncobj *syncobj = ptr;
576
577         drm_syncobj_put(syncobj);
578         return 0;
579 }
580
581 /**
582  * drm_syncobj_release - release file-private sync object resources
583  * @file_private: drm file-private structure to clean up
584  *
585  * Called at close time when the filp is going away.
586  *
587  * Releases any remaining references on objects by this filp.
588  */
589 void
590 drm_syncobj_release(struct drm_file *file_private)
591 {
592         idr_for_each(&file_private->syncobj_idr,
593                      &drm_syncobj_release_handle, file_private);
594         idr_destroy(&file_private->syncobj_idr);
595 }
596
597 int
598 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
599                          struct drm_file *file_private)
600 {
601         struct drm_syncobj_create *args = data;
602
603         if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
604                 return -ENODEV;
605
606         /* no valid flags yet */
607         if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
608                 return -EINVAL;
609
610         return drm_syncobj_create_as_handle(file_private,
611                                             &args->handle, args->flags);
612 }
613
614 int
615 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
616                           struct drm_file *file_private)
617 {
618         struct drm_syncobj_destroy *args = data;
619
620         if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
621                 return -ENODEV;
622
623         /* make sure padding is empty */
624         if (args->pad)
625                 return -EINVAL;
626         return drm_syncobj_destroy(file_private, args->handle);
627 }
628
629 int
630 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
631                                    struct drm_file *file_private)
632 {
633         struct drm_syncobj_handle *args = data;
634
635         if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
636                 return -ENODEV;
637
638         if (args->pad)
639                 return -EINVAL;
640
641         if (args->flags != 0 &&
642             args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
643                 return -EINVAL;
644
645         if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
646                 return drm_syncobj_export_sync_file(file_private, args->handle,
647                                                     &args->fd);
648
649         return drm_syncobj_handle_to_fd(file_private, args->handle,
650                                         &args->fd);
651 }
652
653 int
654 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
655                                    struct drm_file *file_private)
656 {
657         struct drm_syncobj_handle *args = data;
658
659         if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
660                 return -ENODEV;
661
662         if (args->pad)
663                 return -EINVAL;
664
665         if (args->flags != 0 &&
666             args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
667                 return -EINVAL;
668
669         if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
670                 return drm_syncobj_import_sync_file_fence(file_private,
671                                                           args->fd,
672                                                           args->handle);
673
674         return drm_syncobj_fd_to_handle(file_private, args->fd,
675                                         &args->handle);
676 }
677
678 struct syncobj_wait_entry {
679         struct task_struct *task;
680         struct dma_fence *fence;
681         struct dma_fence_cb fence_cb;
682         struct drm_syncobj_cb syncobj_cb;
683 };
684
685 static void syncobj_wait_fence_func(struct dma_fence *fence,
686                                     struct dma_fence_cb *cb)
687 {
688         struct syncobj_wait_entry *wait =
689                 container_of(cb, struct syncobj_wait_entry, fence_cb);
690
691         wake_up_process(wait->task);
692 }
693
694 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
695                                       struct drm_syncobj_cb *cb)
696 {
697         struct syncobj_wait_entry *wait =
698                 container_of(cb, struct syncobj_wait_entry, syncobj_cb);
699
700         /* This happens inside the syncobj lock */
701         wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
702                                                               lockdep_is_held(&syncobj->lock)));
703         wake_up_process(wait->task);
704 }
705
706 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
707                                                   uint32_t count,
708                                                   uint32_t flags,
709                                                   signed long timeout,
710                                                   uint32_t *idx)
711 {
712         struct syncobj_wait_entry *entries;
713         struct dma_fence *fence;
714         signed long ret;
715         uint32_t signaled_count, i;
716
717         entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
718         if (!entries)
719                 return -ENOMEM;
720
721         /* Walk the list of sync objects and initialize entries.  We do
722          * this up-front so that we can properly return -EINVAL if there is
723          * a syncobj with a missing fence and then never have the chance of
724          * returning -EINVAL again.
725          */
726         signaled_count = 0;
727         for (i = 0; i < count; ++i) {
728                 entries[i].task = current;
729                 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
730                 if (!entries[i].fence) {
731                         if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
732                                 continue;
733                         } else {
734                                 ret = -EINVAL;
735                                 goto cleanup_entries;
736                         }
737                 }
738
739                 if (dma_fence_is_signaled(entries[i].fence)) {
740                         if (signaled_count == 0 && idx)
741                                 *idx = i;
742                         signaled_count++;
743                 }
744         }
745
746         /* Initialize ret to the max of timeout and 1.  That way, the
747          * default return value indicates a successful wait and not a
748          * timeout.
749          */
750         ret = max_t(signed long, timeout, 1);
751
752         if (signaled_count == count ||
753             (signaled_count > 0 &&
754              !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
755                 goto cleanup_entries;
756
757         /* There's a very annoying laxness in the dma_fence API here, in
758          * that backends are not required to automatically report when a
759          * fence is signaled prior to fence->ops->enable_signaling() being
760          * called.  So here if we fail to match signaled_count, we need to
761          * fallthough and try a 0 timeout wait!
762          */
763
764         if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
765                 for (i = 0; i < count; ++i) {
766                         drm_syncobj_fence_get_or_add_callback(syncobjs[i],
767                                                               &entries[i].fence,
768                                                               &entries[i].syncobj_cb,
769                                                               syncobj_wait_syncobj_func);
770                 }
771         }
772
773         do {
774                 set_current_state(TASK_INTERRUPTIBLE);
775
776                 signaled_count = 0;
777                 for (i = 0; i < count; ++i) {
778                         fence = entries[i].fence;
779                         if (!fence)
780                                 continue;
781
782                         if (dma_fence_is_signaled(fence) ||
783                             (!entries[i].fence_cb.func &&
784                              dma_fence_add_callback(fence,
785                                                     &entries[i].fence_cb,
786                                                     syncobj_wait_fence_func))) {
787                                 /* The fence has been signaled */
788                                 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
789                                         signaled_count++;
790                                 } else {
791                                         if (idx)
792                                                 *idx = i;
793                                         goto done_waiting;
794                                 }
795                         }
796                 }
797
798                 if (signaled_count == count)
799                         goto done_waiting;
800
801                 if (timeout == 0) {
802                         /* If we are doing a 0 timeout wait and we got
803                          * here, then we just timed out.
804                          */
805                         ret = 0;
806                         goto done_waiting;
807                 }
808
809                 ret = schedule_timeout(ret);
810
811                 if (ret > 0 && signal_pending(current))
812                         ret = -ERESTARTSYS;
813         } while (ret > 0);
814
815 done_waiting:
816         __set_current_state(TASK_RUNNING);
817
818 cleanup_entries:
819         for (i = 0; i < count; ++i) {
820                 if (entries[i].syncobj_cb.func)
821                         drm_syncobj_remove_callback(syncobjs[i],
822                                                     &entries[i].syncobj_cb);
823                 if (entries[i].fence_cb.func)
824                         dma_fence_remove_callback(entries[i].fence,
825                                                   &entries[i].fence_cb);
826                 dma_fence_put(entries[i].fence);
827         }
828         kfree(entries);
829
830         return ret;
831 }
832
833 /**
834  * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
835  *
836  * @timeout_nsec: timeout nsec component in ns, 0 for poll
837  *
838  * Calculate the timeout in jiffies from an absolute time in sec/nsec.
839  */
840 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
841 {
842         ktime_t abs_timeout, now;
843         u64 timeout_ns, timeout_jiffies64;
844
845         /* make 0 timeout means poll - absolute 0 doesn't seem valid */
846         if (timeout_nsec == 0)
847                 return 0;
848
849         abs_timeout = ns_to_ktime(timeout_nsec);
850         now = ktime_get();
851
852         if (!ktime_after(abs_timeout, now))
853                 return 0;
854
855         timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
856
857         timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
858         /*  clamp timeout to avoid infinite timeout */
859         if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
860                 return MAX_SCHEDULE_TIMEOUT - 1;
861
862         return timeout_jiffies64 + 1;
863 }
864
865 static int drm_syncobj_array_wait(struct drm_device *dev,
866                                   struct drm_file *file_private,
867                                   struct drm_syncobj_wait *wait,
868                                   struct drm_syncobj **syncobjs)
869 {
870         signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
871         signed long ret = 0;
872         uint32_t first = ~0;
873
874         ret = drm_syncobj_array_wait_timeout(syncobjs,
875                                              wait->count_handles,
876                                              wait->flags,
877                                              timeout, &first);
878         if (ret < 0)
879                 return ret;
880
881         wait->first_signaled = first;
882         if (ret == 0)
883                 return -ETIME;
884         return 0;
885 }
886
887 static int drm_syncobj_array_find(struct drm_file *file_private,
888                                   void __user *user_handles,
889                                   uint32_t count_handles,
890                                   struct drm_syncobj ***syncobjs_out)
891 {
892         uint32_t i, *handles;
893         struct drm_syncobj **syncobjs;
894         int ret;
895
896         handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
897         if (handles == NULL)
898                 return -ENOMEM;
899
900         if (copy_from_user(handles, user_handles,
901                            sizeof(uint32_t) * count_handles)) {
902                 ret = -EFAULT;
903                 goto err_free_handles;
904         }
905
906         syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
907         if (syncobjs == NULL) {
908                 ret = -ENOMEM;
909                 goto err_free_handles;
910         }
911
912         for (i = 0; i < count_handles; i++) {
913                 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
914                 if (!syncobjs[i]) {
915                         ret = -ENOENT;
916                         goto err_put_syncobjs;
917                 }
918         }
919
920         kfree(handles);
921         *syncobjs_out = syncobjs;
922         return 0;
923
924 err_put_syncobjs:
925         while (i-- > 0)
926                 drm_syncobj_put(syncobjs[i]);
927         kfree(syncobjs);
928 err_free_handles:
929         kfree(handles);
930
931         return ret;
932 }
933
934 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
935                                    uint32_t count)
936 {
937         uint32_t i;
938         for (i = 0; i < count; i++)
939                 drm_syncobj_put(syncobjs[i]);
940         kfree(syncobjs);
941 }
942
943 int
944 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
945                        struct drm_file *file_private)
946 {
947         struct drm_syncobj_wait *args = data;
948         struct drm_syncobj **syncobjs;
949         int ret = 0;
950
951         if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
952                 return -ENODEV;
953
954         if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
955                             DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
956                 return -EINVAL;
957
958         if (args->count_handles == 0)
959                 return -EINVAL;
960
961         ret = drm_syncobj_array_find(file_private,
962                                      u64_to_user_ptr(args->handles),
963                                      args->count_handles,
964                                      &syncobjs);
965         if (ret < 0)
966                 return ret;
967
968         ret = drm_syncobj_array_wait(dev, file_private,
969                                      args, syncobjs);
970
971         drm_syncobj_array_free(syncobjs, args->count_handles);
972
973         return ret;
974 }
975
976 int
977 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
978                         struct drm_file *file_private)
979 {
980         struct drm_syncobj_array *args = data;
981         struct drm_syncobj **syncobjs;
982         uint32_t i;
983         int ret;
984
985         if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
986                 return -ENODEV;
987
988         if (args->pad != 0)
989                 return -EINVAL;
990
991         if (args->count_handles == 0)
992                 return -EINVAL;
993
994         ret = drm_syncobj_array_find(file_private,
995                                      u64_to_user_ptr(args->handles),
996                                      args->count_handles,
997                                      &syncobjs);
998         if (ret < 0)
999                 return ret;
1000
1001         for (i = 0; i < args->count_handles; i++)
1002                 drm_syncobj_replace_fence(syncobjs[i], NULL);
1003
1004         drm_syncobj_array_free(syncobjs, args->count_handles);
1005
1006         return 0;
1007 }
1008
1009 int
1010 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1011                          struct drm_file *file_private)
1012 {
1013         struct drm_syncobj_array *args = data;
1014         struct drm_syncobj **syncobjs;
1015         uint32_t i;
1016         int ret;
1017
1018         if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1019                 return -ENODEV;
1020
1021         if (args->pad != 0)
1022                 return -EINVAL;
1023
1024         if (args->count_handles == 0)
1025                 return -EINVAL;
1026
1027         ret = drm_syncobj_array_find(file_private,
1028                                      u64_to_user_ptr(args->handles),
1029                                      args->count_handles,
1030                                      &syncobjs);
1031         if (ret < 0)
1032                 return ret;
1033
1034         for (i = 0; i < args->count_handles; i++) {
1035                 ret = drm_syncobj_assign_null_handle(syncobjs[i]);
1036                 if (ret < 0)
1037                         break;
1038         }
1039
1040         drm_syncobj_array_free(syncobjs, args->count_handles);
1041
1042         return ret;
1043 }
This page took 0.09148 seconds and 4 git commands to generate.