2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
32 #include "amdgpu_trace.h"
33 #include "amdgpu_amdkfd.h"
35 struct amdgpu_sync_entry {
36 struct hlist_node node;
37 struct dma_fence *fence;
40 static struct kmem_cache *amdgpu_sync_slab;
43 * amdgpu_sync_create - zero init sync object
45 * @sync: sync object to initialize
47 * Just clear the sync object for now.
49 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 hash_init(sync->fences);
52 sync->last_vm_update = NULL;
56 * amdgpu_sync_same_dev - test if fence belong to us
58 * @adev: amdgpu device to use for the test
61 * Test if the fence was issued by us.
63 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
66 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
69 struct amdgpu_ring *ring;
71 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
72 return ring->adev == adev;
79 * amdgpu_sync_get_owner - extract the owner of a fence
81 * @fence: fence get the owner from
83 * Extract who originally created the fence.
85 static void *amdgpu_sync_get_owner(struct dma_fence *f)
87 struct drm_sched_fence *s_fence;
88 struct amdgpu_amdkfd_fence *kfd_fence;
91 return AMDGPU_FENCE_OWNER_UNDEFINED;
93 s_fence = to_drm_sched_fence(f);
95 return s_fence->owner;
97 kfd_fence = to_amdgpu_amdkfd_fence(f);
99 return AMDGPU_FENCE_OWNER_KFD;
101 return AMDGPU_FENCE_OWNER_UNDEFINED;
105 * amdgpu_sync_keep_later - Keep the later fence
107 * @keep: existing fence to test
110 * Either keep the existing fence or the new one, depending which one is later.
112 static void amdgpu_sync_keep_later(struct dma_fence **keep,
113 struct dma_fence *fence)
115 if (*keep && dma_fence_is_later(*keep, fence))
118 dma_fence_put(*keep);
119 *keep = dma_fence_get(fence);
123 * amdgpu_sync_add_later - add the fence to the hash
125 * @sync: sync object to add the fence to
128 * Tries to add the fence to an existing hash entry. Returns true when an entry
129 * was found, false otherwise.
131 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
133 struct amdgpu_sync_entry *e;
135 hash_for_each_possible(sync->fences, e, node, f->context) {
136 if (unlikely(e->fence->context != f->context))
139 amdgpu_sync_keep_later(&e->fence, f);
146 * amdgpu_sync_fence - remember to sync to this fence
148 * @sync: sync object to add fence to
149 * @f: fence to sync to
151 * Add the fence to the sync object.
153 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
155 struct amdgpu_sync_entry *e;
160 if (amdgpu_sync_add_later(sync, f))
163 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
167 hash_add(sync->fences, &e->node, f->context);
168 e->fence = dma_fence_get(f);
173 * amdgpu_sync_vm_fence - remember to sync to this VM fence
175 * @adev: amdgpu device
176 * @sync: sync object to add fence to
177 * @fence: the VM fence to add
179 * Add the fence to the sync object and remember it as VM update.
181 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
186 amdgpu_sync_keep_later(&sync->last_vm_update, fence);
187 return amdgpu_sync_fence(sync, fence);
191 * amdgpu_sync_resv - sync to a reservation object
193 * @sync: sync object to add fences from reservation object to
194 * @resv: reservation object with embedded fence
195 * @mode: how owner affects which fences we sync to
196 * @owner: owner of the planned job submission
200 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
201 struct dma_resv *resv, enum amdgpu_sync_mode mode,
204 struct dma_resv_list *flist;
212 /* always sync to the exclusive fence */
213 f = dma_resv_get_excl(resv);
214 r = amdgpu_sync_fence(sync, f);
216 flist = dma_resv_get_list(resv);
220 for (i = 0; i < flist->shared_count; ++i) {
223 f = rcu_dereference_protected(flist->shared[i],
224 dma_resv_held(resv));
226 fence_owner = amdgpu_sync_get_owner(f);
228 /* Always sync to moves, no matter what */
229 if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
230 r = amdgpu_sync_fence(sync, f);
235 /* We only want to trigger KFD eviction fences on
236 * evict or move jobs. Skip KFD fences otherwise.
238 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
239 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
242 /* Never sync to VM updates either. */
243 if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
244 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
247 /* Ignore fences depending on the sync mode */
249 case AMDGPU_SYNC_ALWAYS:
252 case AMDGPU_SYNC_NE_OWNER:
253 if (amdgpu_sync_same_dev(adev, f) &&
254 fence_owner == owner)
258 case AMDGPU_SYNC_EQ_OWNER:
259 if (amdgpu_sync_same_dev(adev, f) &&
260 fence_owner != owner)
264 case AMDGPU_SYNC_EXPLICIT:
268 WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
269 "Adding eviction fence to sync obj");
270 r = amdgpu_sync_fence(sync, f);
278 * amdgpu_sync_peek_fence - get the next fence not signaled yet
280 * @sync: the sync object
281 * @ring: optional ring to use for test
283 * Returns the next fence not signaled yet without removing it from the sync
286 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
287 struct amdgpu_ring *ring)
289 struct amdgpu_sync_entry *e;
290 struct hlist_node *tmp;
293 hash_for_each_safe(sync->fences, i, tmp, e, node) {
294 struct dma_fence *f = e->fence;
295 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
297 if (dma_fence_is_signaled(f)) {
300 kmem_cache_free(amdgpu_sync_slab, e);
303 if (ring && s_fence) {
304 /* For fences from the same ring it is sufficient
305 * when they are scheduled.
307 if (s_fence->sched == &ring->sched) {
308 if (dma_fence_is_signaled(&s_fence->scheduled))
311 return &s_fence->scheduled;
322 * amdgpu_sync_get_fence - get the next fence from the sync object
324 * @sync: sync object to use
326 * Get and removes the next fence from the sync object not signaled yet.
328 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
330 struct amdgpu_sync_entry *e;
331 struct hlist_node *tmp;
334 hash_for_each_safe(sync->fences, i, tmp, e, node) {
339 kmem_cache_free(amdgpu_sync_slab, e);
341 if (!dma_fence_is_signaled(f))
350 * amdgpu_sync_clone - clone a sync object
352 * @source: sync object to clone
353 * @clone: pointer to destination sync object
355 * Adds references to all unsignaled fences in @source to @clone. Also
356 * removes signaled fences from @source while at it.
358 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
360 struct amdgpu_sync_entry *e;
361 struct hlist_node *tmp;
365 hash_for_each_safe(source->fences, i, tmp, e, node) {
367 if (!dma_fence_is_signaled(f)) {
368 r = amdgpu_sync_fence(clone, f);
374 kmem_cache_free(amdgpu_sync_slab, e);
378 dma_fence_put(clone->last_vm_update);
379 clone->last_vm_update = dma_fence_get(source->last_vm_update);
384 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
386 struct amdgpu_sync_entry *e;
387 struct hlist_node *tmp;
390 hash_for_each_safe(sync->fences, i, tmp, e, node) {
391 r = dma_fence_wait(e->fence, intr);
396 dma_fence_put(e->fence);
397 kmem_cache_free(amdgpu_sync_slab, e);
404 * amdgpu_sync_free - free the sync object
406 * @sync: sync object to use
408 * Free the sync object.
410 void amdgpu_sync_free(struct amdgpu_sync *sync)
412 struct amdgpu_sync_entry *e;
413 struct hlist_node *tmp;
416 hash_for_each_safe(sync->fences, i, tmp, e, node) {
418 dma_fence_put(e->fence);
419 kmem_cache_free(amdgpu_sync_slab, e);
422 dma_fence_put(sync->last_vm_update);
426 * amdgpu_sync_init - init sync object subsystem
428 * Allocate the slab allocator.
430 int amdgpu_sync_init(void)
432 amdgpu_sync_slab = kmem_cache_create(
433 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
434 SLAB_HWCACHE_ALIGN, NULL);
435 if (!amdgpu_sync_slab)
442 * amdgpu_sync_fini - fini sync object subsystem
444 * Free the slab allocator.
446 void amdgpu_sync_fini(void)
448 kmem_cache_destroy(amdgpu_sync_slab);