2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
33 #include "amdgpu_trace.h"
34 #include "amdgpu_amdkfd.h"
36 struct amdgpu_sync_entry {
37 struct hlist_node node;
38 struct dma_fence *fence;
42 static struct kmem_cache *amdgpu_sync_slab;
45 * amdgpu_sync_create - zero init sync object
47 * @sync: sync object to initialize
49 * Just clear the sync object for now.
51 void amdgpu_sync_create(struct amdgpu_sync *sync)
53 hash_init(sync->fences);
54 sync->last_vm_update = NULL;
58 * amdgpu_sync_same_dev - test if fence belong to us
60 * @adev: amdgpu device to use for the test
63 * Test if the fence was issued by us.
65 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
68 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
71 struct amdgpu_ring *ring;
73 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
74 return ring->adev == adev;
81 * amdgpu_sync_get_owner - extract the owner of a fence
83 * @fence: fence get the owner from
85 * Extract who originally created the fence.
87 static void *amdgpu_sync_get_owner(struct dma_fence *f)
89 struct drm_sched_fence *s_fence;
90 struct amdgpu_amdkfd_fence *kfd_fence;
93 return AMDGPU_FENCE_OWNER_UNDEFINED;
95 s_fence = to_drm_sched_fence(f);
97 return s_fence->owner;
99 kfd_fence = to_amdgpu_amdkfd_fence(f);
101 return AMDGPU_FENCE_OWNER_KFD;
103 return AMDGPU_FENCE_OWNER_UNDEFINED;
107 * amdgpu_sync_keep_later - Keep the later fence
109 * @keep: existing fence to test
112 * Either keep the existing fence or the new one, depending which one is later.
114 static void amdgpu_sync_keep_later(struct dma_fence **keep,
115 struct dma_fence *fence)
117 if (*keep && dma_fence_is_later(*keep, fence))
120 dma_fence_put(*keep);
121 *keep = dma_fence_get(fence);
125 * amdgpu_sync_add_later - add the fence to the hash
127 * @sync: sync object to add the fence to
130 * Tries to add the fence to an existing hash entry. Returns true when an entry
131 * was found, false otherwise.
133 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
135 struct amdgpu_sync_entry *e;
137 hash_for_each_possible(sync->fences, e, node, f->context) {
138 if (unlikely(e->fence->context != f->context))
141 amdgpu_sync_keep_later(&e->fence, f);
143 /* Preserve eplicit flag to not loose pipe line sync */
144 e->explicit |= explicit;
152 * amdgpu_sync_fence - remember to sync to this fence
154 * @sync: sync object to add fence to
155 * @fence: fence to sync to
158 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
159 struct dma_fence *f, bool explicit)
161 struct amdgpu_sync_entry *e;
165 if (amdgpu_sync_same_dev(adev, f) &&
166 amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
167 amdgpu_sync_keep_later(&sync->last_vm_update, f);
169 if (amdgpu_sync_add_later(sync, f, explicit))
172 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
176 e->explicit = explicit;
178 hash_add(sync->fences, &e->node, f->context);
179 e->fence = dma_fence_get(f);
184 * amdgpu_sync_resv - sync to a reservation object
186 * @sync: sync object to add fences from reservation object to
187 * @resv: reservation object with embedded fence
188 * @explicit_sync: true if we should only sync to the exclusive fence
192 int amdgpu_sync_resv(struct amdgpu_device *adev,
193 struct amdgpu_sync *sync,
194 struct reservation_object *resv,
195 void *owner, bool explicit_sync)
197 struct reservation_object_list *flist;
206 /* always sync to the exclusive fence */
207 f = reservation_object_get_excl(resv);
208 r = amdgpu_sync_fence(adev, sync, f, false);
210 flist = reservation_object_get_list(resv);
214 for (i = 0; i < flist->shared_count; ++i) {
215 f = rcu_dereference_protected(flist->shared[i],
216 reservation_object_held(resv));
217 /* We only want to trigger KFD eviction fences on
218 * evict or move jobs. Skip KFD fences otherwise.
220 fence_owner = amdgpu_sync_get_owner(f);
221 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
222 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
225 if (amdgpu_sync_same_dev(adev, f)) {
226 /* VM updates are only interesting
227 * for other VM updates and moves.
229 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
230 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
231 ((owner == AMDGPU_FENCE_OWNER_VM) !=
232 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
235 /* Ignore fence from the same owner and explicit one as
236 * long as it isn't undefined.
238 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
239 (fence_owner == owner || explicit_sync))
243 r = amdgpu_sync_fence(adev, sync, f, false);
251 * amdgpu_sync_peek_fence - get the next fence not signaled yet
253 * @sync: the sync object
254 * @ring: optional ring to use for test
256 * Returns the next fence not signaled yet without removing it from the sync
259 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
260 struct amdgpu_ring *ring)
262 struct amdgpu_sync_entry *e;
263 struct hlist_node *tmp;
266 hash_for_each_safe(sync->fences, i, tmp, e, node) {
267 struct dma_fence *f = e->fence;
268 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
270 if (dma_fence_is_signaled(f)) {
273 kmem_cache_free(amdgpu_sync_slab, e);
276 if (ring && s_fence) {
277 /* For fences from the same ring it is sufficient
278 * when they are scheduled.
280 if (s_fence->sched == &ring->sched) {
281 if (dma_fence_is_signaled(&s_fence->scheduled))
284 return &s_fence->scheduled;
295 * amdgpu_sync_get_fence - get the next fence from the sync object
297 * @sync: sync object to use
298 * @explicit: true if the next fence is explicit
300 * Get and removes the next fence from the sync object not signaled yet.
302 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
304 struct amdgpu_sync_entry *e;
305 struct hlist_node *tmp;
308 hash_for_each_safe(sync->fences, i, tmp, e, node) {
312 *explicit = e->explicit;
315 kmem_cache_free(amdgpu_sync_slab, e);
317 if (!dma_fence_is_signaled(f))
326 * amdgpu_sync_clone - clone a sync object
328 * @source: sync object to clone
329 * @clone: pointer to destination sync object
331 * Adds references to all unsignaled fences in @source to @clone. Also
332 * removes signaled fences from @source while at it.
334 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
336 struct amdgpu_sync_entry *e;
337 struct hlist_node *tmp;
341 hash_for_each_safe(source->fences, i, tmp, e, node) {
343 if (!dma_fence_is_signaled(f)) {
344 r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
350 kmem_cache_free(amdgpu_sync_slab, e);
354 dma_fence_put(clone->last_vm_update);
355 clone->last_vm_update = dma_fence_get(source->last_vm_update);
360 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
362 struct amdgpu_sync_entry *e;
363 struct hlist_node *tmp;
366 hash_for_each_safe(sync->fences, i, tmp, e, node) {
367 r = dma_fence_wait(e->fence, intr);
372 dma_fence_put(e->fence);
373 kmem_cache_free(amdgpu_sync_slab, e);
380 * amdgpu_sync_free - free the sync object
382 * @sync: sync object to use
384 * Free the sync object.
386 void amdgpu_sync_free(struct amdgpu_sync *sync)
388 struct amdgpu_sync_entry *e;
389 struct hlist_node *tmp;
392 hash_for_each_safe(sync->fences, i, tmp, e, node) {
394 dma_fence_put(e->fence);
395 kmem_cache_free(amdgpu_sync_slab, e);
398 dma_fence_put(sync->last_vm_update);
402 * amdgpu_sync_init - init sync object subsystem
404 * Allocate the slab allocator.
406 int amdgpu_sync_init(void)
408 amdgpu_sync_slab = kmem_cache_create(
409 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
410 SLAB_HWCACHE_ALIGN, NULL);
411 if (!amdgpu_sync_slab)
418 * amdgpu_sync_fini - fini sync object subsystem
420 * Free the slab allocator.
422 void amdgpu_sync_fini(void)
424 kmem_cache_destroy(amdgpu_sync_slab);