]> Git Repo - linux.git/blob - drivers/staging/android/sync.c
mm: rework virtual memory accounting
[linux.git] / drivers / staging / android / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28
29 #include "sync.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
33
34 static const struct fence_ops android_fence_ops;
35 static const struct file_operations sync_fence_fops;
36
37 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
38                                            int size, const char *name)
39 {
40         struct sync_timeline *obj;
41
42         if (size < sizeof(struct sync_timeline))
43                 return NULL;
44
45         obj = kzalloc(size, GFP_KERNEL);
46         if (!obj)
47                 return NULL;
48
49         kref_init(&obj->kref);
50         obj->ops = ops;
51         obj->context = fence_context_alloc(1);
52         strlcpy(obj->name, name, sizeof(obj->name));
53
54         INIT_LIST_HEAD(&obj->child_list_head);
55         INIT_LIST_HEAD(&obj->active_list_head);
56         spin_lock_init(&obj->child_list_lock);
57
58         sync_timeline_debug_add(obj);
59
60         return obj;
61 }
62 EXPORT_SYMBOL(sync_timeline_create);
63
64 static void sync_timeline_free(struct kref *kref)
65 {
66         struct sync_timeline *obj =
67                 container_of(kref, struct sync_timeline, kref);
68
69         sync_timeline_debug_remove(obj);
70
71         if (obj->ops->release_obj)
72                 obj->ops->release_obj(obj);
73
74         kfree(obj);
75 }
76
77 static void sync_timeline_get(struct sync_timeline *obj)
78 {
79         kref_get(&obj->kref);
80 }
81
82 static void sync_timeline_put(struct sync_timeline *obj)
83 {
84         kref_put(&obj->kref, sync_timeline_free);
85 }
86
87 void sync_timeline_destroy(struct sync_timeline *obj)
88 {
89         obj->destroyed = true;
90         /*
91          * Ensure timeline is marked as destroyed before
92          * changing timeline's fences status.
93          */
94         smp_wmb();
95
96         /*
97          * signal any children that their parent is going away.
98          */
99         sync_timeline_signal(obj);
100         sync_timeline_put(obj);
101 }
102 EXPORT_SYMBOL(sync_timeline_destroy);
103
104 void sync_timeline_signal(struct sync_timeline *obj)
105 {
106         unsigned long flags;
107         LIST_HEAD(signaled_pts);
108         struct sync_pt *pt, *next;
109
110         trace_sync_timeline(obj);
111
112         spin_lock_irqsave(&obj->child_list_lock, flags);
113
114         list_for_each_entry_safe(pt, next, &obj->active_list_head,
115                                  active_list) {
116                 if (fence_is_signaled_locked(&pt->base))
117                         list_del_init(&pt->active_list);
118         }
119
120         spin_unlock_irqrestore(&obj->child_list_lock, flags);
121 }
122 EXPORT_SYMBOL(sync_timeline_signal);
123
124 struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
125 {
126         unsigned long flags;
127         struct sync_pt *pt;
128
129         if (size < sizeof(struct sync_pt))
130                 return NULL;
131
132         pt = kzalloc(size, GFP_KERNEL);
133         if (!pt)
134                 return NULL;
135
136         spin_lock_irqsave(&obj->child_list_lock, flags);
137         sync_timeline_get(obj);
138         fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
139                    obj->context, ++obj->value);
140         list_add_tail(&pt->child_list, &obj->child_list_head);
141         INIT_LIST_HEAD(&pt->active_list);
142         spin_unlock_irqrestore(&obj->child_list_lock, flags);
143         return pt;
144 }
145 EXPORT_SYMBOL(sync_pt_create);
146
147 void sync_pt_free(struct sync_pt *pt)
148 {
149         fence_put(&pt->base);
150 }
151 EXPORT_SYMBOL(sync_pt_free);
152
153 static struct sync_fence *sync_fence_alloc(int size, const char *name)
154 {
155         struct sync_fence *fence;
156
157         fence = kzalloc(size, GFP_KERNEL);
158         if (!fence)
159                 return NULL;
160
161         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
162                                          fence, 0);
163         if (IS_ERR(fence->file))
164                 goto err;
165
166         kref_init(&fence->kref);
167         strlcpy(fence->name, name, sizeof(fence->name));
168
169         init_waitqueue_head(&fence->wq);
170
171         return fence;
172
173 err:
174         kfree(fence);
175         return NULL;
176 }
177
178 static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
179 {
180         struct sync_fence_cb *check;
181         struct sync_fence *fence;
182
183         check = container_of(cb, struct sync_fence_cb, cb);
184         fence = check->fence;
185
186         if (atomic_dec_and_test(&fence->status))
187                 wake_up_all(&fence->wq);
188 }
189
190 /* TODO: implement a create which takes more that one sync_pt */
191 struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt)
192 {
193         struct sync_fence *fence;
194
195         fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
196         if (!fence)
197                 return NULL;
198
199         fence->num_fences = 1;
200         atomic_set(&fence->status, 1);
201
202         fence->cbs[0].sync_pt = pt;
203         fence->cbs[0].fence = fence;
204         if (fence_add_callback(pt, &fence->cbs[0].cb, fence_check_cb_func))
205                 atomic_dec(&fence->status);
206
207         sync_fence_debug_add(fence);
208
209         return fence;
210 }
211 EXPORT_SYMBOL(sync_fence_create_dma);
212
213 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
214 {
215         return sync_fence_create_dma(name, &pt->base);
216 }
217 EXPORT_SYMBOL(sync_fence_create);
218
219 struct sync_fence *sync_fence_fdget(int fd)
220 {
221         struct file *file = fget(fd);
222
223         if (!file)
224                 return NULL;
225
226         if (file->f_op != &sync_fence_fops)
227                 goto err;
228
229         return file->private_data;
230
231 err:
232         fput(file);
233         return NULL;
234 }
235 EXPORT_SYMBOL(sync_fence_fdget);
236
237 void sync_fence_put(struct sync_fence *fence)
238 {
239         fput(fence->file);
240 }
241 EXPORT_SYMBOL(sync_fence_put);
242
243 void sync_fence_install(struct sync_fence *fence, int fd)
244 {
245         fd_install(fd, fence->file);
246 }
247 EXPORT_SYMBOL(sync_fence_install);
248
249 static void sync_fence_add_pt(struct sync_fence *fence,
250                               int *i, struct fence *pt)
251 {
252         fence->cbs[*i].sync_pt = pt;
253         fence->cbs[*i].fence = fence;
254
255         if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
256                 fence_get(pt);
257                 (*i)++;
258         }
259 }
260
261 struct sync_fence *sync_fence_merge(const char *name,
262                                     struct sync_fence *a, struct sync_fence *b)
263 {
264         int num_fences = a->num_fences + b->num_fences;
265         struct sync_fence *fence;
266         int i, i_a, i_b;
267         unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
268
269         fence = sync_fence_alloc(size, name);
270         if (!fence)
271                 return NULL;
272
273         atomic_set(&fence->status, num_fences);
274
275         /*
276          * Assume sync_fence a and b are both ordered and have no
277          * duplicates with the same context.
278          *
279          * If a sync_fence can only be created with sync_fence_merge
280          * and sync_fence_create, this is a reasonable assumption.
281          */
282         for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
283                 struct fence *pt_a = a->cbs[i_a].sync_pt;
284                 struct fence *pt_b = b->cbs[i_b].sync_pt;
285
286                 if (pt_a->context < pt_b->context) {
287                         sync_fence_add_pt(fence, &i, pt_a);
288
289                         i_a++;
290                 } else if (pt_a->context > pt_b->context) {
291                         sync_fence_add_pt(fence, &i, pt_b);
292
293                         i_b++;
294                 } else {
295                         if (pt_a->seqno - pt_b->seqno <= INT_MAX)
296                                 sync_fence_add_pt(fence, &i, pt_a);
297                         else
298                                 sync_fence_add_pt(fence, &i, pt_b);
299
300                         i_a++;
301                         i_b++;
302                 }
303         }
304
305         for (; i_a < a->num_fences; i_a++)
306                 sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
307
308         for (; i_b < b->num_fences; i_b++)
309                 sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
310
311         if (num_fences > i)
312                 atomic_sub(num_fences - i, &fence->status);
313         fence->num_fences = i;
314
315         sync_fence_debug_add(fence);
316         return fence;
317 }
318 EXPORT_SYMBOL(sync_fence_merge);
319
320 int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
321                           int wake_flags, void *key)
322 {
323         struct sync_fence_waiter *wait;
324
325         wait = container_of(curr, struct sync_fence_waiter, work);
326         list_del_init(&wait->work.task_list);
327
328         wait->callback(wait->work.private, wait);
329         return 1;
330 }
331
332 int sync_fence_wait_async(struct sync_fence *fence,
333                           struct sync_fence_waiter *waiter)
334 {
335         int err = atomic_read(&fence->status);
336         unsigned long flags;
337
338         if (err < 0)
339                 return err;
340
341         if (!err)
342                 return 1;
343
344         init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
345         waiter->work.private = fence;
346
347         spin_lock_irqsave(&fence->wq.lock, flags);
348         err = atomic_read(&fence->status);
349         if (err > 0)
350                 __add_wait_queue_tail(&fence->wq, &waiter->work);
351         spin_unlock_irqrestore(&fence->wq.lock, flags);
352
353         if (err < 0)
354                 return err;
355
356         return !err;
357 }
358 EXPORT_SYMBOL(sync_fence_wait_async);
359
360 int sync_fence_cancel_async(struct sync_fence *fence,
361                             struct sync_fence_waiter *waiter)
362 {
363         unsigned long flags;
364         int ret = 0;
365
366         spin_lock_irqsave(&fence->wq.lock, flags);
367         if (!list_empty(&waiter->work.task_list))
368                 list_del_init(&waiter->work.task_list);
369         else
370                 ret = -ENOENT;
371         spin_unlock_irqrestore(&fence->wq.lock, flags);
372         return ret;
373 }
374 EXPORT_SYMBOL(sync_fence_cancel_async);
375
376 int sync_fence_wait(struct sync_fence *fence, long timeout)
377 {
378         long ret;
379         int i;
380
381         if (timeout < 0)
382                 timeout = MAX_SCHEDULE_TIMEOUT;
383         else
384                 timeout = msecs_to_jiffies(timeout);
385
386         trace_sync_wait(fence, 1);
387         for (i = 0; i < fence->num_fences; ++i)
388                 trace_sync_pt(fence->cbs[i].sync_pt);
389         ret = wait_event_interruptible_timeout(fence->wq,
390                                                atomic_read(&fence->status) <= 0,
391                                                timeout);
392         trace_sync_wait(fence, 0);
393
394         if (ret < 0) {
395                 return ret;
396         } else if (ret == 0) {
397                 if (timeout) {
398                         pr_info("fence timeout on [%p] after %dms\n", fence,
399                                 jiffies_to_msecs(timeout));
400                         sync_dump();
401                 }
402                 return -ETIME;
403         }
404
405         ret = atomic_read(&fence->status);
406         if (ret) {
407                 pr_info("fence error %ld on [%p]\n", ret, fence);
408                 sync_dump();
409         }
410         return ret;
411 }
412 EXPORT_SYMBOL(sync_fence_wait);
413
414 static const char *android_fence_get_driver_name(struct fence *fence)
415 {
416         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
417         struct sync_timeline *parent = sync_pt_parent(pt);
418
419         return parent->ops->driver_name;
420 }
421
422 static const char *android_fence_get_timeline_name(struct fence *fence)
423 {
424         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
425         struct sync_timeline *parent = sync_pt_parent(pt);
426
427         return parent->name;
428 }
429
430 static void android_fence_release(struct fence *fence)
431 {
432         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
433         struct sync_timeline *parent = sync_pt_parent(pt);
434         unsigned long flags;
435
436         spin_lock_irqsave(fence->lock, flags);
437         list_del(&pt->child_list);
438         if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
439                 list_del(&pt->active_list);
440         spin_unlock_irqrestore(fence->lock, flags);
441
442         if (parent->ops->free_pt)
443                 parent->ops->free_pt(pt);
444
445         sync_timeline_put(parent);
446         fence_free(&pt->base);
447 }
448
449 static bool android_fence_signaled(struct fence *fence)
450 {
451         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
452         struct sync_timeline *parent = sync_pt_parent(pt);
453         int ret;
454
455         ret = parent->ops->has_signaled(pt);
456         if (ret < 0)
457                 fence->status = ret;
458         return ret;
459 }
460
461 static bool android_fence_enable_signaling(struct fence *fence)
462 {
463         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
464         struct sync_timeline *parent = sync_pt_parent(pt);
465
466         if (android_fence_signaled(fence))
467                 return false;
468
469         list_add_tail(&pt->active_list, &parent->active_list_head);
470         return true;
471 }
472
473 static int android_fence_fill_driver_data(struct fence *fence,
474                                           void *data, int size)
475 {
476         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
477         struct sync_timeline *parent = sync_pt_parent(pt);
478
479         if (!parent->ops->fill_driver_data)
480                 return 0;
481         return parent->ops->fill_driver_data(pt, data, size);
482 }
483
484 static void android_fence_value_str(struct fence *fence,
485                                     char *str, int size)
486 {
487         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
488         struct sync_timeline *parent = sync_pt_parent(pt);
489
490         if (!parent->ops->pt_value_str) {
491                 if (size)
492                         *str = 0;
493                 return;
494         }
495         parent->ops->pt_value_str(pt, str, size);
496 }
497
498 static void android_fence_timeline_value_str(struct fence *fence,
499                                              char *str, int size)
500 {
501         struct sync_pt *pt = container_of(fence, struct sync_pt, base);
502         struct sync_timeline *parent = sync_pt_parent(pt);
503
504         if (!parent->ops->timeline_value_str) {
505                 if (size)
506                         *str = 0;
507                 return;
508         }
509         parent->ops->timeline_value_str(parent, str, size);
510 }
511
512 static const struct fence_ops android_fence_ops = {
513         .get_driver_name = android_fence_get_driver_name,
514         .get_timeline_name = android_fence_get_timeline_name,
515         .enable_signaling = android_fence_enable_signaling,
516         .signaled = android_fence_signaled,
517         .wait = fence_default_wait,
518         .release = android_fence_release,
519         .fill_driver_data = android_fence_fill_driver_data,
520         .fence_value_str = android_fence_value_str,
521         .timeline_value_str = android_fence_timeline_value_str,
522 };
523
524 static void sync_fence_free(struct kref *kref)
525 {
526         struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
527         int i;
528
529         for (i = 0; i < fence->num_fences; ++i) {
530                 fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
531                 fence_put(fence->cbs[i].sync_pt);
532         }
533
534         kfree(fence);
535 }
536
537 static int sync_fence_release(struct inode *inode, struct file *file)
538 {
539         struct sync_fence *fence = file->private_data;
540
541         sync_fence_debug_remove(fence);
542
543         kref_put(&fence->kref, sync_fence_free);
544         return 0;
545 }
546
547 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
548 {
549         struct sync_fence *fence = file->private_data;
550         int status;
551
552         poll_wait(file, &fence->wq, wait);
553
554         status = atomic_read(&fence->status);
555
556         if (!status)
557                 return POLLIN;
558         else if (status < 0)
559                 return POLLERR;
560         return 0;
561 }
562
563 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
564 {
565         __s32 value;
566
567         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
568                 return -EFAULT;
569
570         return sync_fence_wait(fence, value);
571 }
572
573 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
574 {
575         int fd = get_unused_fd_flags(O_CLOEXEC);
576         int err;
577         struct sync_fence *fence2, *fence3;
578         struct sync_merge_data data;
579
580         if (fd < 0)
581                 return fd;
582
583         if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
584                 err = -EFAULT;
585                 goto err_put_fd;
586         }
587
588         fence2 = sync_fence_fdget(data.fd2);
589         if (!fence2) {
590                 err = -ENOENT;
591                 goto err_put_fd;
592         }
593
594         data.name[sizeof(data.name) - 1] = '\0';
595         fence3 = sync_fence_merge(data.name, fence, fence2);
596         if (!fence3) {
597                 err = -ENOMEM;
598                 goto err_put_fence2;
599         }
600
601         data.fence = fd;
602         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
603                 err = -EFAULT;
604                 goto err_put_fence3;
605         }
606
607         sync_fence_install(fence3, fd);
608         sync_fence_put(fence2);
609         return 0;
610
611 err_put_fence3:
612         sync_fence_put(fence3);
613
614 err_put_fence2:
615         sync_fence_put(fence2);
616
617 err_put_fd:
618         put_unused_fd(fd);
619         return err;
620 }
621
622 static int sync_fill_pt_info(struct fence *fence, void *data, int size)
623 {
624         struct sync_pt_info *info = data;
625         int ret;
626
627         if (size < sizeof(struct sync_pt_info))
628                 return -ENOMEM;
629
630         info->len = sizeof(struct sync_pt_info);
631
632         if (fence->ops->fill_driver_data) {
633                 ret = fence->ops->fill_driver_data(fence, info->driver_data,
634                                                    size - sizeof(*info));
635                 if (ret < 0)
636                         return ret;
637
638                 info->len += ret;
639         }
640
641         strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
642                 sizeof(info->obj_name));
643         strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
644                 sizeof(info->driver_name));
645         if (fence_is_signaled(fence))
646                 info->status = fence->status >= 0 ? 1 : fence->status;
647         else
648                 info->status = 0;
649         info->timestamp_ns = ktime_to_ns(fence->timestamp);
650
651         return info->len;
652 }
653
654 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
655                                         unsigned long arg)
656 {
657         struct sync_fence_info_data *data;
658         __u32 size;
659         __u32 len = 0;
660         int ret, i;
661
662         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
663                 return -EFAULT;
664
665         if (size < sizeof(struct sync_fence_info_data))
666                 return -EINVAL;
667
668         if (size > 4096)
669                 size = 4096;
670
671         data = kzalloc(size, GFP_KERNEL);
672         if (!data)
673                 return -ENOMEM;
674
675         strlcpy(data->name, fence->name, sizeof(data->name));
676         data->status = atomic_read(&fence->status);
677         if (data->status >= 0)
678                 data->status = !data->status;
679
680         len = sizeof(struct sync_fence_info_data);
681
682         for (i = 0; i < fence->num_fences; ++i) {
683                 struct fence *pt = fence->cbs[i].sync_pt;
684
685                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
686
687                 if (ret < 0)
688                         goto out;
689
690                 len += ret;
691         }
692
693         data->len = len;
694
695         if (copy_to_user((void __user *)arg, data, len))
696                 ret = -EFAULT;
697         else
698                 ret = 0;
699
700 out:
701         kfree(data);
702
703         return ret;
704 }
705
706 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
707                              unsigned long arg)
708 {
709         struct sync_fence *fence = file->private_data;
710
711         switch (cmd) {
712         case SYNC_IOC_WAIT:
713                 return sync_fence_ioctl_wait(fence, arg);
714
715         case SYNC_IOC_MERGE:
716                 return sync_fence_ioctl_merge(fence, arg);
717
718         case SYNC_IOC_FENCE_INFO:
719                 return sync_fence_ioctl_fence_info(fence, arg);
720
721         default:
722                 return -ENOTTY;
723         }
724 }
725
726 static const struct file_operations sync_fence_fops = {
727         .release = sync_fence_release,
728         .poll = sync_fence_poll,
729         .unlocked_ioctl = sync_fence_ioctl,
730         .compat_ioctl = sync_fence_ioctl,
731 };
732
This page took 0.069747 seconds and 4 git commands to generate.