]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
Merge tag 'x86-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <[email protected]>
29  */
30
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33 #include "amdgpu_amdkfd.h"
34
35 struct amdgpu_sync_entry {
36         struct hlist_node       node;
37         struct dma_fence        *fence;
38         bool    explicit;
39 };
40
41 static struct kmem_cache *amdgpu_sync_slab;
42
43 /**
44  * amdgpu_sync_create - zero init sync object
45  *
46  * @sync: sync object to initialize
47  *
48  * Just clear the sync object for now.
49  */
50 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 {
52         hash_init(sync->fences);
53         sync->last_vm_update = NULL;
54 }
55
56 /**
57  * amdgpu_sync_same_dev - test if fence belong to us
58  *
59  * @adev: amdgpu device to use for the test
60  * @f: fence to test
61  *
62  * Test if the fence was issued by us.
63  */
64 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
65                                  struct dma_fence *f)
66 {
67         struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
68
69         if (s_fence) {
70                 struct amdgpu_ring *ring;
71
72                 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73                 return ring->adev == adev;
74         }
75
76         return false;
77 }
78
79 /**
80  * amdgpu_sync_get_owner - extract the owner of a fence
81  *
82  * @fence: fence get the owner from
83  *
84  * Extract who originally created the fence.
85  */
86 static void *amdgpu_sync_get_owner(struct dma_fence *f)
87 {
88         struct drm_sched_fence *s_fence;
89         struct amdgpu_amdkfd_fence *kfd_fence;
90
91         if (!f)
92                 return AMDGPU_FENCE_OWNER_UNDEFINED;
93
94         s_fence = to_drm_sched_fence(f);
95         if (s_fence)
96                 return s_fence->owner;
97
98         kfd_fence = to_amdgpu_amdkfd_fence(f);
99         if (kfd_fence)
100                 return AMDGPU_FENCE_OWNER_KFD;
101
102         return AMDGPU_FENCE_OWNER_UNDEFINED;
103 }
104
105 /**
106  * amdgpu_sync_keep_later - Keep the later fence
107  *
108  * @keep: existing fence to test
109  * @fence: new fence
110  *
111  * Either keep the existing fence or the new one, depending which one is later.
112  */
113 static void amdgpu_sync_keep_later(struct dma_fence **keep,
114                                    struct dma_fence *fence)
115 {
116         if (*keep && dma_fence_is_later(*keep, fence))
117                 return;
118
119         dma_fence_put(*keep);
120         *keep = dma_fence_get(fence);
121 }
122
123 /**
124  * amdgpu_sync_add_later - add the fence to the hash
125  *
126  * @sync: sync object to add the fence to
127  * @f: fence to add
128  *
129  * Tries to add the fence to an existing hash entry. Returns true when an entry
130  * was found, false otherwise.
131  */
132 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
133                                   bool explicit)
134 {
135         struct amdgpu_sync_entry *e;
136
137         hash_for_each_possible(sync->fences, e, node, f->context) {
138                 if (unlikely(e->fence->context != f->context))
139                         continue;
140
141                 amdgpu_sync_keep_later(&e->fence, f);
142
143                 /* Preserve eplicit flag to not loose pipe line sync */
144                 e->explicit |= explicit;
145
146                 return true;
147         }
148         return false;
149 }
150
151 /**
152  * amdgpu_sync_fence - remember to sync to this fence
153  *
154  * @sync: sync object to add fence to
155  * @f: fence to sync to
156  * @explicit: if this is an explicit dependency
157  *
158  * Add the fence to the sync object.
159  */
160 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
161                       bool explicit)
162 {
163         struct amdgpu_sync_entry *e;
164
165         if (!f)
166                 return 0;
167
168         if (amdgpu_sync_add_later(sync, f, explicit))
169                 return 0;
170
171         e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
172         if (!e)
173                 return -ENOMEM;
174
175         e->explicit = explicit;
176
177         hash_add(sync->fences, &e->node, f->context);
178         e->fence = dma_fence_get(f);
179         return 0;
180 }
181
182 /**
183  * amdgpu_sync_vm_fence - remember to sync to this VM fence
184  *
185  * @adev: amdgpu device
186  * @sync: sync object to add fence to
187  * @fence: the VM fence to add
188  *
189  * Add the fence to the sync object and remember it as VM update.
190  */
191 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
192 {
193         if (!fence)
194                 return 0;
195
196         amdgpu_sync_keep_later(&sync->last_vm_update, fence);
197         return amdgpu_sync_fence(sync, fence, false);
198 }
199
200 /**
201  * amdgpu_sync_resv - sync to a reservation object
202  *
203  * @sync: sync object to add fences from reservation object to
204  * @resv: reservation object with embedded fence
205  * @explicit_sync: true if we should only sync to the exclusive fence
206  *
207  * Sync to the fence
208  */
209 int amdgpu_sync_resv(struct amdgpu_device *adev,
210                      struct amdgpu_sync *sync,
211                      struct dma_resv *resv,
212                      void *owner, bool explicit_sync)
213 {
214         struct dma_resv_list *flist;
215         struct dma_fence *f;
216         void *fence_owner;
217         unsigned i;
218         int r = 0;
219
220         if (resv == NULL)
221                 return -EINVAL;
222
223         /* always sync to the exclusive fence */
224         f = dma_resv_get_excl(resv);
225         r = amdgpu_sync_fence(sync, f, false);
226
227         flist = dma_resv_get_list(resv);
228         if (!flist || r)
229                 return r;
230
231         for (i = 0; i < flist->shared_count; ++i) {
232                 f = rcu_dereference_protected(flist->shared[i],
233                                               dma_resv_held(resv));
234                 /* We only want to trigger KFD eviction fences on
235                  * evict or move jobs. Skip KFD fences otherwise.
236                  */
237                 fence_owner = amdgpu_sync_get_owner(f);
238                 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
239                     owner != AMDGPU_FENCE_OWNER_UNDEFINED)
240                         continue;
241
242                 if (amdgpu_sync_same_dev(adev, f)) {
243                         /* VM updates only sync with moves but not with user
244                          * command submissions or KFD evictions fences
245                          */
246                         if (owner == AMDGPU_FENCE_OWNER_VM &&
247                             fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
248                                 continue;
249
250                         /* Ignore fence from the same owner and explicit one as
251                          * long as it isn't undefined.
252                          */
253                         if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
254                             (fence_owner == owner || explicit_sync))
255                                 continue;
256                 }
257
258                 r = amdgpu_sync_fence(sync, f, false);
259                 if (r)
260                         break;
261         }
262         return r;
263 }
264
265 /**
266  * amdgpu_sync_peek_fence - get the next fence not signaled yet
267  *
268  * @sync: the sync object
269  * @ring: optional ring to use for test
270  *
271  * Returns the next fence not signaled yet without removing it from the sync
272  * object.
273  */
274 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
275                                          struct amdgpu_ring *ring)
276 {
277         struct amdgpu_sync_entry *e;
278         struct hlist_node *tmp;
279         int i;
280
281         hash_for_each_safe(sync->fences, i, tmp, e, node) {
282                 struct dma_fence *f = e->fence;
283                 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
284
285                 if (dma_fence_is_signaled(f)) {
286                         hash_del(&e->node);
287                         dma_fence_put(f);
288                         kmem_cache_free(amdgpu_sync_slab, e);
289                         continue;
290                 }
291                 if (ring && s_fence) {
292                         /* For fences from the same ring it is sufficient
293                          * when they are scheduled.
294                          */
295                         if (s_fence->sched == &ring->sched) {
296                                 if (dma_fence_is_signaled(&s_fence->scheduled))
297                                         continue;
298
299                                 return &s_fence->scheduled;
300                         }
301                 }
302
303                 return f;
304         }
305
306         return NULL;
307 }
308
309 /**
310  * amdgpu_sync_get_fence - get the next fence from the sync object
311  *
312  * @sync: sync object to use
313  * @explicit: true if the next fence is explicit
314  *
315  * Get and removes the next fence from the sync object not signaled yet.
316  */
317 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
318 {
319         struct amdgpu_sync_entry *e;
320         struct hlist_node *tmp;
321         struct dma_fence *f;
322         int i;
323         hash_for_each_safe(sync->fences, i, tmp, e, node) {
324
325                 f = e->fence;
326                 if (explicit)
327                         *explicit = e->explicit;
328
329                 hash_del(&e->node);
330                 kmem_cache_free(amdgpu_sync_slab, e);
331
332                 if (!dma_fence_is_signaled(f))
333                         return f;
334
335                 dma_fence_put(f);
336         }
337         return NULL;
338 }
339
340 /**
341  * amdgpu_sync_clone - clone a sync object
342  *
343  * @source: sync object to clone
344  * @clone: pointer to destination sync object
345  *
346  * Adds references to all unsignaled fences in @source to @clone. Also
347  * removes signaled fences from @source while at it.
348  */
349 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
350 {
351         struct amdgpu_sync_entry *e;
352         struct hlist_node *tmp;
353         struct dma_fence *f;
354         int i, r;
355
356         hash_for_each_safe(source->fences, i, tmp, e, node) {
357                 f = e->fence;
358                 if (!dma_fence_is_signaled(f)) {
359                         r = amdgpu_sync_fence(clone, f, e->explicit);
360                         if (r)
361                                 return r;
362                 } else {
363                         hash_del(&e->node);
364                         dma_fence_put(f);
365                         kmem_cache_free(amdgpu_sync_slab, e);
366                 }
367         }
368
369         dma_fence_put(clone->last_vm_update);
370         clone->last_vm_update = dma_fence_get(source->last_vm_update);
371
372         return 0;
373 }
374
375 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
376 {
377         struct amdgpu_sync_entry *e;
378         struct hlist_node *tmp;
379         int i, r;
380
381         hash_for_each_safe(sync->fences, i, tmp, e, node) {
382                 r = dma_fence_wait(e->fence, intr);
383                 if (r)
384                         return r;
385
386                 hash_del(&e->node);
387                 dma_fence_put(e->fence);
388                 kmem_cache_free(amdgpu_sync_slab, e);
389         }
390
391         return 0;
392 }
393
394 /**
395  * amdgpu_sync_free - free the sync object
396  *
397  * @sync: sync object to use
398  *
399  * Free the sync object.
400  */
401 void amdgpu_sync_free(struct amdgpu_sync *sync)
402 {
403         struct amdgpu_sync_entry *e;
404         struct hlist_node *tmp;
405         unsigned i;
406
407         hash_for_each_safe(sync->fences, i, tmp, e, node) {
408                 hash_del(&e->node);
409                 dma_fence_put(e->fence);
410                 kmem_cache_free(amdgpu_sync_slab, e);
411         }
412
413         dma_fence_put(sync->last_vm_update);
414 }
415
416 /**
417  * amdgpu_sync_init - init sync object subsystem
418  *
419  * Allocate the slab allocator.
420  */
421 int amdgpu_sync_init(void)
422 {
423         amdgpu_sync_slab = kmem_cache_create(
424                 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
425                 SLAB_HWCACHE_ALIGN, NULL);
426         if (!amdgpu_sync_slab)
427                 return -ENOMEM;
428
429         return 0;
430 }
431
432 /**
433  * amdgpu_sync_fini - fini sync object subsystem
434  *
435  * Free the slab allocator.
436  */
437 void amdgpu_sync_fini(void)
438 {
439         kmem_cache_destroy(amdgpu_sync_slab);
440 }
This page took 0.065464 seconds and 4 git commands to generate.