]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
Merge tag 'gvt-next-2017-12-14' of https://github.com/intel/gvt-linux into drm-intel...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <[email protected]>
29  */
30
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 struct amdgpu_sync_entry {
36         struct hlist_node       node;
37         struct dma_fence        *fence;
38         bool    explicit;
39 };
40
41 static struct kmem_cache *amdgpu_sync_slab;
42
43 /**
44  * amdgpu_sync_create - zero init sync object
45  *
46  * @sync: sync object to initialize
47  *
48  * Just clear the sync object for now.
49  */
50 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 {
52         hash_init(sync->fences);
53         sync->last_vm_update = NULL;
54 }
55
56 /**
57  * amdgpu_sync_same_dev - test if fence belong to us
58  *
59  * @adev: amdgpu device to use for the test
60  * @f: fence to test
61  *
62  * Test if the fence was issued by us.
63  */
64 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
65                                  struct dma_fence *f)
66 {
67         struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
68
69         if (s_fence) {
70                 struct amdgpu_ring *ring;
71
72                 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73                 return ring->adev == adev;
74         }
75
76         return false;
77 }
78
79 /**
80  * amdgpu_sync_get_owner - extract the owner of a fence
81  *
82  * @fence: fence get the owner from
83  *
84  * Extract who originally created the fence.
85  */
86 static void *amdgpu_sync_get_owner(struct dma_fence *f)
87 {
88         struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
89
90         if (s_fence)
91                 return s_fence->owner;
92
93         return AMDGPU_FENCE_OWNER_UNDEFINED;
94 }
95
96 /**
97  * amdgpu_sync_keep_later - Keep the later fence
98  *
99  * @keep: existing fence to test
100  * @fence: new fence
101  *
102  * Either keep the existing fence or the new one, depending which one is later.
103  */
104 static void amdgpu_sync_keep_later(struct dma_fence **keep,
105                                    struct dma_fence *fence)
106 {
107         if (*keep && dma_fence_is_later(*keep, fence))
108                 return;
109
110         dma_fence_put(*keep);
111         *keep = dma_fence_get(fence);
112 }
113
114 /**
115  * amdgpu_sync_add_later - add the fence to the hash
116  *
117  * @sync: sync object to add the fence to
118  * @f: fence to add
119  *
120  * Tries to add the fence to an existing hash entry. Returns true when an entry
121  * was found, false otherwise.
122  */
123 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
124 {
125         struct amdgpu_sync_entry *e;
126
127         hash_for_each_possible(sync->fences, e, node, f->context) {
128                 if (unlikely(e->fence->context != f->context))
129                         continue;
130
131                 amdgpu_sync_keep_later(&e->fence, f);
132                 return true;
133         }
134         return false;
135 }
136
137 /**
138  * amdgpu_sync_fence - remember to sync to this fence
139  *
140  * @sync: sync object to add fence to
141  * @fence: fence to sync to
142  *
143  */
144 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
145                       struct dma_fence *f, bool explicit)
146 {
147         struct amdgpu_sync_entry *e;
148
149         if (!f)
150                 return 0;
151
152         if (amdgpu_sync_same_dev(adev, f) &&
153             amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
154                 amdgpu_sync_keep_later(&sync->last_vm_update, f);
155
156         if (amdgpu_sync_add_later(sync, f))
157                 return 0;
158
159         e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
160         if (!e)
161                 return -ENOMEM;
162
163         e->explicit = explicit;
164
165         hash_add(sync->fences, &e->node, f->context);
166         e->fence = dma_fence_get(f);
167         return 0;
168 }
169
170 /**
171  * amdgpu_sync_resv - sync to a reservation object
172  *
173  * @sync: sync object to add fences from reservation object to
174  * @resv: reservation object with embedded fence
175  * @explicit_sync: true if we should only sync to the exclusive fence
176  *
177  * Sync to the fence
178  */
179 int amdgpu_sync_resv(struct amdgpu_device *adev,
180                      struct amdgpu_sync *sync,
181                      struct reservation_object *resv,
182                      void *owner, bool explicit_sync)
183 {
184         struct reservation_object_list *flist;
185         struct dma_fence *f;
186         void *fence_owner;
187         unsigned i;
188         int r = 0;
189
190         if (resv == NULL)
191                 return -EINVAL;
192
193         /* always sync to the exclusive fence */
194         f = reservation_object_get_excl(resv);
195         r = amdgpu_sync_fence(adev, sync, f, false);
196
197         flist = reservation_object_get_list(resv);
198         if (!flist || r)
199                 return r;
200
201         for (i = 0; i < flist->shared_count; ++i) {
202                 f = rcu_dereference_protected(flist->shared[i],
203                                               reservation_object_held(resv));
204                 if (amdgpu_sync_same_dev(adev, f)) {
205                         /* VM updates are only interesting
206                          * for other VM updates and moves.
207                          */
208                         fence_owner = amdgpu_sync_get_owner(f);
209                         if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
210                             (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
211                             ((owner == AMDGPU_FENCE_OWNER_VM) !=
212                              (fence_owner == AMDGPU_FENCE_OWNER_VM)))
213                                 continue;
214
215                         /* Ignore fence from the same owner and explicit one as
216                          * long as it isn't undefined.
217                          */
218                         if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
219                             (fence_owner == owner || explicit_sync))
220                                 continue;
221                 }
222
223                 r = amdgpu_sync_fence(adev, sync, f, false);
224                 if (r)
225                         break;
226         }
227         return r;
228 }
229
230 /**
231  * amdgpu_sync_peek_fence - get the next fence not signaled yet
232  *
233  * @sync: the sync object
234  * @ring: optional ring to use for test
235  *
236  * Returns the next fence not signaled yet without removing it from the sync
237  * object.
238  */
239 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
240                                          struct amdgpu_ring *ring)
241 {
242         struct amdgpu_sync_entry *e;
243         struct hlist_node *tmp;
244         int i;
245
246         hash_for_each_safe(sync->fences, i, tmp, e, node) {
247                 struct dma_fence *f = e->fence;
248                 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
249
250                 if (dma_fence_is_signaled(f)) {
251                         hash_del(&e->node);
252                         dma_fence_put(f);
253                         kmem_cache_free(amdgpu_sync_slab, e);
254                         continue;
255                 }
256                 if (ring && s_fence) {
257                         /* For fences from the same ring it is sufficient
258                          * when they are scheduled.
259                          */
260                         if (s_fence->sched == &ring->sched) {
261                                 if (dma_fence_is_signaled(&s_fence->scheduled))
262                                         continue;
263
264                                 return &s_fence->scheduled;
265                         }
266                 }
267
268                 return f;
269         }
270
271         return NULL;
272 }
273
274 /**
275  * amdgpu_sync_get_fence - get the next fence from the sync object
276  *
277  * @sync: sync object to use
278  * @explicit: true if the next fence is explicit
279  *
280  * Get and removes the next fence from the sync object not signaled yet.
281  */
282 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
283 {
284         struct amdgpu_sync_entry *e;
285         struct hlist_node *tmp;
286         struct dma_fence *f;
287         int i;
288         hash_for_each_safe(sync->fences, i, tmp, e, node) {
289
290                 f = e->fence;
291                 if (explicit)
292                         *explicit = e->explicit;
293
294                 hash_del(&e->node);
295                 kmem_cache_free(amdgpu_sync_slab, e);
296
297                 if (!dma_fence_is_signaled(f))
298                         return f;
299
300                 dma_fence_put(f);
301         }
302         return NULL;
303 }
304
305 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
306 {
307         struct amdgpu_sync_entry *e;
308         struct hlist_node *tmp;
309         int i, r;
310
311         hash_for_each_safe(sync->fences, i, tmp, e, node) {
312                 r = dma_fence_wait(e->fence, intr);
313                 if (r)
314                         return r;
315
316                 hash_del(&e->node);
317                 dma_fence_put(e->fence);
318                 kmem_cache_free(amdgpu_sync_slab, e);
319         }
320
321         return 0;
322 }
323
324 /**
325  * amdgpu_sync_free - free the sync object
326  *
327  * @sync: sync object to use
328  *
329  * Free the sync object.
330  */
331 void amdgpu_sync_free(struct amdgpu_sync *sync)
332 {
333         struct amdgpu_sync_entry *e;
334         struct hlist_node *tmp;
335         unsigned i;
336
337         hash_for_each_safe(sync->fences, i, tmp, e, node) {
338                 hash_del(&e->node);
339                 dma_fence_put(e->fence);
340                 kmem_cache_free(amdgpu_sync_slab, e);
341         }
342
343         dma_fence_put(sync->last_vm_update);
344 }
345
346 /**
347  * amdgpu_sync_init - init sync object subsystem
348  *
349  * Allocate the slab allocator.
350  */
351 int amdgpu_sync_init(void)
352 {
353         amdgpu_sync_slab = kmem_cache_create(
354                 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
355                 SLAB_HWCACHE_ALIGN, NULL);
356         if (!amdgpu_sync_slab)
357                 return -ENOMEM;
358
359         return 0;
360 }
361
362 /**
363  * amdgpu_sync_fini - fini sync object subsystem
364  *
365  * Free the slab allocator.
366  */
367 void amdgpu_sync_fini(void)
368 {
369         kmem_cache_destroy(amdgpu_sync_slab);
370 }
This page took 0.062329 seconds and 4 git commands to generate.