2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
33 #include "amdgpu_trace.h"
35 struct amdgpu_sync_entry {
36 struct hlist_node node;
40 static struct kmem_cache *amdgpu_sync_slab;
43 * amdgpu_sync_create - zero init sync object
45 * @sync: sync object to initialize
47 * Just clear the sync object for now.
49 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 hash_init(sync->fences);
52 sync->last_vm_update = NULL;
56 * amdgpu_sync_same_dev - test if fence belong to us
58 * @adev: amdgpu device to use for the test
61 * Test if the fence was issued by us.
63 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
65 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
68 struct amdgpu_ring *ring;
70 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
71 return ring->adev == adev;
78 * amdgpu_sync_get_owner - extract the owner of a fence
80 * @fence: fence get the owner from
82 * Extract who originally created the fence.
84 static void *amdgpu_sync_get_owner(struct fence *f)
86 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
89 return s_fence->owner;
91 return AMDGPU_FENCE_OWNER_UNDEFINED;
95 * amdgpu_sync_keep_later - Keep the later fence
97 * @keep: existing fence to test
100 * Either keep the existing fence or the new one, depending which one is later.
102 static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
104 if (*keep && fence_is_later(*keep, fence))
108 *keep = fence_get(fence);
112 * amdgpu_sync_add_later - add the fence to the hash
114 * @sync: sync object to add the fence to
117 * Tries to add the fence to an existing hash entry. Returns true when an entry
118 * was found, false otherwise.
120 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
122 struct amdgpu_sync_entry *e;
124 hash_for_each_possible(sync->fences, e, node, f->context) {
125 if (unlikely(e->fence->context != f->context))
128 amdgpu_sync_keep_later(&e->fence, f);
135 * amdgpu_sync_fence - remember to sync to this fence
137 * @sync: sync object to add fence to
138 * @fence: fence to sync to
141 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
144 struct amdgpu_sync_entry *e;
149 if (amdgpu_sync_same_dev(adev, f) &&
150 amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
151 amdgpu_sync_keep_later(&sync->last_vm_update, f);
153 if (amdgpu_sync_add_later(sync, f))
156 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
160 hash_add(sync->fences, &e->node, f->context);
161 e->fence = fence_get(f);
166 * amdgpu_sync_resv - sync to a reservation object
168 * @sync: sync object to add fences from reservation object to
169 * @resv: reservation object with embedded fence
170 * @shared: true if we should only sync to the exclusive fence
174 int amdgpu_sync_resv(struct amdgpu_device *adev,
175 struct amdgpu_sync *sync,
176 struct reservation_object *resv,
179 struct reservation_object_list *flist;
188 /* always sync to the exclusive fence */
189 f = reservation_object_get_excl(resv);
190 r = amdgpu_sync_fence(adev, sync, f);
192 flist = reservation_object_get_list(resv);
196 for (i = 0; i < flist->shared_count; ++i) {
197 f = rcu_dereference_protected(flist->shared[i],
198 reservation_object_held(resv));
199 if (amdgpu_sync_same_dev(adev, f)) {
200 /* VM updates are only interesting
201 * for other VM updates and moves.
203 fence_owner = amdgpu_sync_get_owner(f);
204 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
205 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
206 ((owner == AMDGPU_FENCE_OWNER_VM) !=
207 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
210 /* Ignore fence from the same owner as
211 * long as it isn't undefined.
213 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
214 fence_owner == owner)
218 r = amdgpu_sync_fence(adev, sync, f);
226 * amdgpu_sync_peek_fence - get the next fence not signaled yet
228 * @sync: the sync object
229 * @ring: optional ring to use for test
231 * Returns the next fence not signaled yet without removing it from the sync
234 struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
235 struct amdgpu_ring *ring)
237 struct amdgpu_sync_entry *e;
238 struct hlist_node *tmp;
241 hash_for_each_safe(sync->fences, i, tmp, e, node) {
242 struct fence *f = e->fence;
243 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
245 if (ring && s_fence) {
246 /* For fences from the same ring it is sufficient
247 * when they are scheduled.
249 if (s_fence->sched == &ring->sched) {
250 if (fence_is_signaled(&s_fence->scheduled))
253 return &s_fence->scheduled;
257 if (fence_is_signaled(f)) {
260 kmem_cache_free(amdgpu_sync_slab, e);
271 * amdgpu_sync_get_fence - get the next fence from the sync object
273 * @sync: sync object to use
275 * Get and removes the next fence from the sync object not signaled yet.
277 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
279 struct amdgpu_sync_entry *e;
280 struct hlist_node *tmp;
284 hash_for_each_safe(sync->fences, i, tmp, e, node) {
289 kmem_cache_free(amdgpu_sync_slab, e);
291 if (!fence_is_signaled(f))
300 * amdgpu_sync_free - free the sync object
302 * @sync: sync object to use
304 * Free the sync object.
306 void amdgpu_sync_free(struct amdgpu_sync *sync)
308 struct amdgpu_sync_entry *e;
309 struct hlist_node *tmp;
312 hash_for_each_safe(sync->fences, i, tmp, e, node) {
315 kmem_cache_free(amdgpu_sync_slab, e);
318 fence_put(sync->last_vm_update);
322 * amdgpu_sync_init - init sync object subsystem
324 * Allocate the slab allocator.
326 int amdgpu_sync_init(void)
328 amdgpu_sync_slab = kmem_cache_create(
329 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
330 SLAB_HWCACHE_ALIGN, NULL);
331 if (!amdgpu_sync_slab)
338 * amdgpu_sync_fini - fini sync object subsystem
340 * Free the slab allocator.
342 void amdgpu_sync_fini(void)
344 kmem_cache_destroy(amdgpu_sync_slab);