]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drm/amdgpu: remove amdgpu_bo_list_clone
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <[email protected]>
29  */
30
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 struct amdgpu_sync_entry {
36         struct hlist_node       node;
37         struct fence            *fence;
38 };
39
40 /**
41  * amdgpu_sync_create - zero init sync object
42  *
43  * @sync: sync object to initialize
44  *
45  * Just clear the sync object for now.
46  */
47 void amdgpu_sync_create(struct amdgpu_sync *sync)
48 {
49         unsigned i;
50
51         for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
52                 sync->semaphores[i] = NULL;
53
54         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
55                 sync->sync_to[i] = NULL;
56
57         hash_init(sync->fences);
58         sync->last_vm_update = NULL;
59 }
60
61 /**
62  * amdgpu_sync_fence - remember to sync to this fence
63  *
64  * @sync: sync object to add fence to
65  * @fence: fence to sync to
66  *
67  */
68 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
69                       struct fence *f)
70 {
71         struct amdgpu_sync_entry *e;
72         struct amdgpu_fence *fence;
73         struct amdgpu_fence *other;
74
75         if (!f)
76                 return 0;
77
78         fence = to_amdgpu_fence(f);
79         if (!fence || fence->ring->adev != adev) {
80                 hash_for_each_possible(sync->fences, e, node, f->context) {
81                         struct fence *new;
82                         if (unlikely(e->fence->context != f->context))
83                                 continue;
84                         new = fence_get(fence_later(e->fence, f));
85                         if (new) {
86                                 fence_put(e->fence);
87                                 e->fence = new;
88                         }
89                         return 0;
90                 }
91
92                 e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
93                 if (!e)
94                         return -ENOMEM;
95
96                 hash_add(sync->fences, &e->node, f->context);
97                 e->fence = fence_get(f);
98                 return 0;
99         }
100
101         other = sync->sync_to[fence->ring->idx];
102         sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
103                 amdgpu_fence_later(fence, other));
104         amdgpu_fence_unref(&other);
105
106         if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
107                 other = sync->last_vm_update;
108                 sync->last_vm_update = amdgpu_fence_ref(
109                         amdgpu_fence_later(fence, other));
110                 amdgpu_fence_unref(&other);
111         }
112
113         return 0;
114 }
115
116 /**
117  * amdgpu_sync_resv - use the semaphores to sync to a reservation object
118  *
119  * @sync: sync object to add fences from reservation object to
120  * @resv: reservation object with embedded fence
121  * @shared: true if we should only sync to the exclusive fence
122  *
123  * Sync to the fence using the semaphore objects
124  */
125 int amdgpu_sync_resv(struct amdgpu_device *adev,
126                      struct amdgpu_sync *sync,
127                      struct reservation_object *resv,
128                      void *owner)
129 {
130         struct reservation_object_list *flist;
131         struct fence *f;
132         struct amdgpu_fence *fence;
133         unsigned i;
134         int r = 0;
135
136         if (resv == NULL)
137                 return -EINVAL;
138
139         /* always sync to the exclusive fence */
140         f = reservation_object_get_excl(resv);
141         r = amdgpu_sync_fence(adev, sync, f);
142
143         flist = reservation_object_get_list(resv);
144         if (!flist || r)
145                 return r;
146
147         for (i = 0; i < flist->shared_count; ++i) {
148                 f = rcu_dereference_protected(flist->shared[i],
149                                               reservation_object_held(resv));
150                 fence = f ? to_amdgpu_fence(f) : NULL;
151                 if (fence && fence->ring->adev == adev) {
152                         /* VM updates are only interesting
153                          * for other VM updates and moves.
154                          */
155                         if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
156                             (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
157                             ((owner == AMDGPU_FENCE_OWNER_VM) !=
158                              (fence->owner == AMDGPU_FENCE_OWNER_VM)))
159                                 continue;
160
161                         /* Ignore fence from the same owner as
162                          * long as it isn't undefined.
163                          */
164                         if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
165                             fence->owner == owner)
166                                 continue;
167                 }
168
169                 r = amdgpu_sync_fence(adev, sync, f);
170                 if (r)
171                         break;
172         }
173         return r;
174 }
175
176 int amdgpu_sync_wait(struct amdgpu_sync *sync)
177 {
178         struct amdgpu_sync_entry *e;
179         struct hlist_node *tmp;
180         int i, r;
181
182         hash_for_each_safe(sync->fences, i, tmp, e, node) {
183                 r = fence_wait(e->fence, false);
184                 if (r)
185                         return r;
186
187                 hash_del(&e->node);
188                 fence_put(e->fence);
189                 kfree(e);
190         }
191         return 0;
192 }
193
194 /**
195  * amdgpu_sync_rings - sync ring to all registered fences
196  *
197  * @sync: sync object to use
198  * @ring: ring that needs sync
199  *
200  * Ensure that all registered fences are signaled before letting
201  * the ring continue. The caller must hold the ring lock.
202  */
203 int amdgpu_sync_rings(struct amdgpu_sync *sync,
204                       struct amdgpu_ring *ring)
205 {
206         struct amdgpu_device *adev = ring->adev;
207         unsigned count = 0;
208         int i, r;
209
210         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
211                 struct amdgpu_fence *fence = sync->sync_to[i];
212                 struct amdgpu_semaphore *semaphore;
213                 struct amdgpu_ring *other = adev->rings[i];
214
215                 /* check if we really need to sync */
216                 if (!amdgpu_fence_need_sync(fence, ring))
217                         continue;
218
219                 /* prevent GPU deadlocks */
220                 if (!other->ready) {
221                         dev_err(adev->dev, "Syncing to a disabled ring!");
222                         return -EINVAL;
223                 }
224
225                 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
226                         /* not enough room, wait manually */
227                         r = fence_wait(&fence->base, false);
228                         if (r)
229                                 return r;
230                         continue;
231                 }
232                 r = amdgpu_semaphore_create(adev, &semaphore);
233                 if (r)
234                         return r;
235
236                 sync->semaphores[count++] = semaphore;
237
238                 /* allocate enough space for sync command */
239                 r = amdgpu_ring_alloc(other, 16);
240                 if (r)
241                         return r;
242
243                 /* emit the signal semaphore */
244                 if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
245                         /* signaling wasn't successful wait manually */
246                         amdgpu_ring_undo(other);
247                         r = fence_wait(&fence->base, false);
248                         if (r)
249                                 return r;
250                         continue;
251                 }
252
253                 /* we assume caller has already allocated space on waiters ring */
254                 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
255                         /* waiting wasn't successful wait manually */
256                         amdgpu_ring_undo(other);
257                         r = fence_wait(&fence->base, false);
258                         if (r)
259                                 return r;
260                         continue;
261                 }
262
263                 amdgpu_ring_commit(other);
264                 amdgpu_fence_note_sync(fence, ring);
265         }
266
267         return 0;
268 }
269
270 /**
271  * amdgpu_sync_free - free the sync object
272  *
273  * @adev: amdgpu_device pointer
274  * @sync: sync object to use
275  * @fence: fence to use for the free
276  *
277  * Free the sync object by freeing all semaphores in it.
278  */
279 void amdgpu_sync_free(struct amdgpu_device *adev,
280                       struct amdgpu_sync *sync,
281                       struct fence *fence)
282 {
283         struct amdgpu_sync_entry *e;
284         struct hlist_node *tmp;
285         unsigned i;
286
287         hash_for_each_safe(sync->fences, i, tmp, e, node) {
288                 hash_del(&e->node);
289                 fence_put(e->fence);
290                 kfree(e);
291         }
292
293         for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
294                 amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
295
296         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
297                 amdgpu_fence_unref(&sync->sync_to[i]);
298
299         amdgpu_fence_unref(&sync->last_vm_update);
300 }
This page took 0.055298 seconds and 4 git commands to generate.