2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
33 #include "amdgpu_trace.h"
35 struct amdgpu_sync_entry {
36 struct hlist_node node;
41 * amdgpu_sync_create - zero init sync object
43 * @sync: sync object to initialize
45 * Just clear the sync object for now.
47 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
52 sync->semaphores[i] = NULL;
54 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
55 sync->sync_to[i] = NULL;
57 hash_init(sync->fences);
58 sync->last_vm_update = NULL;
62 * amdgpu_sync_fence - remember to sync to this fence
64 * @sync: sync object to add fence to
65 * @fence: fence to sync to
68 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
71 struct amdgpu_sync_entry *e;
72 struct amdgpu_fence *fence;
73 struct amdgpu_fence *other;
78 fence = to_amdgpu_fence(f);
79 if (!fence || fence->ring->adev != adev) {
80 hash_for_each_possible(sync->fences, e, node, f->context) {
82 if (unlikely(e->fence->context != f->context))
84 new = fence_get(fence_later(e->fence, f));
92 e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
96 hash_add(sync->fences, &e->node, f->context);
97 e->fence = fence_get(f);
101 other = sync->sync_to[fence->ring->idx];
102 sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
103 amdgpu_fence_later(fence, other));
104 amdgpu_fence_unref(&other);
106 if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
107 other = sync->last_vm_update;
108 sync->last_vm_update = amdgpu_fence_ref(
109 amdgpu_fence_later(fence, other));
110 amdgpu_fence_unref(&other);
117 * amdgpu_sync_resv - use the semaphores to sync to a reservation object
119 * @sync: sync object to add fences from reservation object to
120 * @resv: reservation object with embedded fence
121 * @shared: true if we should only sync to the exclusive fence
123 * Sync to the fence using the semaphore objects
125 int amdgpu_sync_resv(struct amdgpu_device *adev,
126 struct amdgpu_sync *sync,
127 struct reservation_object *resv,
130 struct reservation_object_list *flist;
132 struct amdgpu_fence *fence;
139 /* always sync to the exclusive fence */
140 f = reservation_object_get_excl(resv);
141 r = amdgpu_sync_fence(adev, sync, f);
143 flist = reservation_object_get_list(resv);
147 for (i = 0; i < flist->shared_count; ++i) {
148 f = rcu_dereference_protected(flist->shared[i],
149 reservation_object_held(resv));
150 fence = f ? to_amdgpu_fence(f) : NULL;
151 if (fence && fence->ring->adev == adev) {
152 /* VM updates are only interesting
153 * for other VM updates and moves.
155 if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
156 (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
157 ((owner == AMDGPU_FENCE_OWNER_VM) !=
158 (fence->owner == AMDGPU_FENCE_OWNER_VM)))
161 /* Ignore fence from the same owner as
162 * long as it isn't undefined.
164 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
165 fence->owner == owner)
169 r = amdgpu_sync_fence(adev, sync, f);
176 int amdgpu_sync_wait(struct amdgpu_sync *sync)
178 struct amdgpu_sync_entry *e;
179 struct hlist_node *tmp;
182 hash_for_each_safe(sync->fences, i, tmp, e, node) {
183 r = fence_wait(e->fence, false);
195 * amdgpu_sync_rings - sync ring to all registered fences
197 * @sync: sync object to use
198 * @ring: ring that needs sync
200 * Ensure that all registered fences are signaled before letting
201 * the ring continue. The caller must hold the ring lock.
203 int amdgpu_sync_rings(struct amdgpu_sync *sync,
204 struct amdgpu_ring *ring)
206 struct amdgpu_device *adev = ring->adev;
210 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
211 struct amdgpu_fence *fence = sync->sync_to[i];
212 struct amdgpu_semaphore *semaphore;
213 struct amdgpu_ring *other = adev->rings[i];
215 /* check if we really need to sync */
216 if (!amdgpu_fence_need_sync(fence, ring))
219 /* prevent GPU deadlocks */
221 dev_err(adev->dev, "Syncing to a disabled ring!");
225 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
226 /* not enough room, wait manually */
227 r = fence_wait(&fence->base, false);
232 r = amdgpu_semaphore_create(adev, &semaphore);
236 sync->semaphores[count++] = semaphore;
238 /* allocate enough space for sync command */
239 r = amdgpu_ring_alloc(other, 16);
243 /* emit the signal semaphore */
244 if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
245 /* signaling wasn't successful wait manually */
246 amdgpu_ring_undo(other);
247 r = fence_wait(&fence->base, false);
253 /* we assume caller has already allocated space on waiters ring */
254 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
255 /* waiting wasn't successful wait manually */
256 amdgpu_ring_undo(other);
257 r = fence_wait(&fence->base, false);
263 amdgpu_ring_commit(other);
264 amdgpu_fence_note_sync(fence, ring);
271 * amdgpu_sync_free - free the sync object
273 * @adev: amdgpu_device pointer
274 * @sync: sync object to use
275 * @fence: fence to use for the free
277 * Free the sync object by freeing all semaphores in it.
279 void amdgpu_sync_free(struct amdgpu_device *adev,
280 struct amdgpu_sync *sync,
283 struct amdgpu_sync_entry *e;
284 struct hlist_node *tmp;
287 hash_for_each_safe(sync->fences, i, tmp, e, node) {
293 for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
294 amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
296 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
297 amdgpu_fence_unref(&sync->sync_to[i]);
299 amdgpu_fence_unref(&sync->last_vm_update);