]> Git Repo - linux.git/blob - drivers/gpu/drm/scheduler/sched_fence.c
Merge branch kvm-arm64/pgtable-fixes-6.4 into kvmarm-master/fixes
[linux.git] / drivers / gpu / drm / scheduler / sched_fence.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/kthread.h>
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/wait.h>
29
30 #include <drm/gpu_scheduler.h>
31
32 static struct kmem_cache *sched_fence_slab;
33
34 static int __init drm_sched_fence_slab_init(void)
35 {
36         sched_fence_slab = kmem_cache_create(
37                 "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
38                 SLAB_HWCACHE_ALIGN, NULL);
39         if (!sched_fence_slab)
40                 return -ENOMEM;
41
42         return 0;
43 }
44
45 static void __exit drm_sched_fence_slab_fini(void)
46 {
47         rcu_barrier();
48         kmem_cache_destroy(sched_fence_slab);
49 }
50
51 void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
52 {
53         dma_fence_signal(&fence->scheduled);
54 }
55
56 void drm_sched_fence_finished(struct drm_sched_fence *fence)
57 {
58         dma_fence_signal(&fence->finished);
59 }
60
61 static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
62 {
63         return "drm_sched";
64 }
65
66 static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
67 {
68         struct drm_sched_fence *fence = to_drm_sched_fence(f);
69         return (const char *)fence->sched->name;
70 }
71
72 static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
73 {
74         struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
75         struct drm_sched_fence *fence = to_drm_sched_fence(f);
76
77         if (!WARN_ON_ONCE(!fence))
78                 kmem_cache_free(sched_fence_slab, fence);
79 }
80
81 /**
82  * drm_sched_fence_free - free up an uninitialized fence
83  *
84  * @fence: fence to free
85  *
86  * Free up the fence memory. Should only be used if drm_sched_fence_init()
87  * has not been called yet.
88  */
89 void drm_sched_fence_free(struct drm_sched_fence *fence)
90 {
91         /* This function should not be called if the fence has been initialized. */
92         if (!WARN_ON_ONCE(fence->sched))
93                 kmem_cache_free(sched_fence_slab, fence);
94 }
95
96 /**
97  * drm_sched_fence_release_scheduled - callback that fence can be freed
98  *
99  * @f: fence
100  *
101  * This function is called when the reference count becomes zero.
102  * It just RCU schedules freeing up the fence.
103  */
104 static void drm_sched_fence_release_scheduled(struct dma_fence *f)
105 {
106         struct drm_sched_fence *fence = to_drm_sched_fence(f);
107
108         dma_fence_put(fence->parent);
109         call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
110 }
111
112 /**
113  * drm_sched_fence_release_finished - drop extra reference
114  *
115  * @f: fence
116  *
117  * Drop the extra reference from the scheduled fence to the base fence.
118  */
119 static void drm_sched_fence_release_finished(struct dma_fence *f)
120 {
121         struct drm_sched_fence *fence = to_drm_sched_fence(f);
122
123         dma_fence_put(&fence->scheduled);
124 }
125
126 static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
127                                                   ktime_t deadline)
128 {
129         struct drm_sched_fence *fence = to_drm_sched_fence(f);
130         struct dma_fence *parent;
131         unsigned long flags;
132
133         spin_lock_irqsave(&fence->lock, flags);
134
135         /* If we already have an earlier deadline, keep it: */
136         if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
137             ktime_before(fence->deadline, deadline)) {
138                 spin_unlock_irqrestore(&fence->lock, flags);
139                 return;
140         }
141
142         fence->deadline = deadline;
143         set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
144
145         spin_unlock_irqrestore(&fence->lock, flags);
146
147         /*
148          * smp_load_aquire() to ensure that if we are racing another
149          * thread calling drm_sched_fence_set_parent(), that we see
150          * the parent set before it calls test_bit(HAS_DEADLINE_BIT)
151          */
152         parent = smp_load_acquire(&fence->parent);
153         if (parent)
154                 dma_fence_set_deadline(parent, deadline);
155 }
156
157 static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
158         .get_driver_name = drm_sched_fence_get_driver_name,
159         .get_timeline_name = drm_sched_fence_get_timeline_name,
160         .release = drm_sched_fence_release_scheduled,
161 };
162
163 static const struct dma_fence_ops drm_sched_fence_ops_finished = {
164         .get_driver_name = drm_sched_fence_get_driver_name,
165         .get_timeline_name = drm_sched_fence_get_timeline_name,
166         .release = drm_sched_fence_release_finished,
167         .set_deadline = drm_sched_fence_set_deadline_finished,
168 };
169
170 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
171 {
172         if (f->ops == &drm_sched_fence_ops_scheduled)
173                 return container_of(f, struct drm_sched_fence, scheduled);
174
175         if (f->ops == &drm_sched_fence_ops_finished)
176                 return container_of(f, struct drm_sched_fence, finished);
177
178         return NULL;
179 }
180 EXPORT_SYMBOL(to_drm_sched_fence);
181
182 void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
183                                 struct dma_fence *fence)
184 {
185         /*
186          * smp_store_release() to ensure another thread racing us
187          * in drm_sched_fence_set_deadline_finished() sees the
188          * fence's parent set before test_bit()
189          */
190         smp_store_release(&s_fence->parent, dma_fence_get(fence));
191         if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
192                      &s_fence->finished.flags))
193                 dma_fence_set_deadline(fence, s_fence->deadline);
194 }
195
196 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
197                                               void *owner)
198 {
199         struct drm_sched_fence *fence = NULL;
200
201         fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
202         if (fence == NULL)
203                 return NULL;
204
205         fence->owner = owner;
206         spin_lock_init(&fence->lock);
207
208         return fence;
209 }
210
211 void drm_sched_fence_init(struct drm_sched_fence *fence,
212                           struct drm_sched_entity *entity)
213 {
214         unsigned seq;
215
216         fence->sched = entity->rq->sched;
217         seq = atomic_inc_return(&entity->fence_seq);
218         dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
219                        &fence->lock, entity->fence_context, seq);
220         dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
221                        &fence->lock, entity->fence_context + 1, seq);
222 }
223
224 module_init(drm_sched_fence_slab_init);
225 module_exit(drm_sched_fence_slab_fini);
226
227 MODULE_DESCRIPTION("DRM GPU scheduler");
228 MODULE_LICENSE("GPL and additional rights");
This page took 0.046248 seconds and 4 git commands to generate.