]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drm/amdgpu: use kmemdup rather than duplicating its implementation
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
index 4fffb253933184a23e49a46a8ce5dbc74a519fb0..4921de15b45158fe89af11d540eebcf998e2983c 100644 (file)
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
 
        if (a_fence)
                return a_fence->ring->adev == adev;
-       if (s_fence)
-               return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+
+       if (s_fence) {
+               struct amdgpu_ring *ring;
+
+               ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+               return ring->adev == adev;
+       }
+
        return false;
 }
 
@@ -142,6 +148,18 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
        return 0;
 }
 
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+       if (s_fence)
+               return s_fence->owner;
+       else if (a_fence)
+               return a_fence->owner;
+       return AMDGPU_FENCE_OWNER_UNDEFINED;
+}
+
 /**
  * amdgpu_sync_resv - use the semaphores to sync to a reservation object
  *
@@ -158,7 +176,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 {
        struct reservation_object_list *flist;
        struct fence *f;
-       struct amdgpu_fence *fence;
+       void *fence_owner;
        unsigned i;
        int r = 0;
 
@@ -176,22 +194,22 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        for (i = 0; i < flist->shared_count; ++i) {
                f = rcu_dereference_protected(flist->shared[i],
                                              reservation_object_held(resv));
-               fence = f ? to_amdgpu_fence(f) : NULL;
-               if (fence && fence->ring->adev == adev) {
+               if (amdgpu_sync_same_dev(adev, f)) {
                        /* VM updates are only interesting
                         * for other VM updates and moves.
                         */
+                       fence_owner = amdgpu_sync_get_owner(f);
                        if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
-                           (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
+                           (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
                            ((owner == AMDGPU_FENCE_OWNER_VM) !=
-                            (fence->owner == AMDGPU_FENCE_OWNER_VM)))
+                            (fence_owner == AMDGPU_FENCE_OWNER_VM)))
                                continue;
 
                        /* Ignore fence from the same owner as
                         * long as it isn't undefined.
                         */
                        if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-                           fence->owner == owner)
+                           fence_owner == owner)
                                continue;
                }
 
@@ -202,6 +220,28 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        return r;
 }
 
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       struct fence *f;
+       int i;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+               f = e->fence;
+
+               hash_del(&e->node);
+               kfree(e);
+
+               if (!fence_is_signaled(f))
+                       return f;
+
+               fence_put(f);
+       }
+       return NULL;
+}
+
 int amdgpu_sync_wait(struct amdgpu_sync *sync)
 {
        struct amdgpu_sync_entry *e;
@@ -217,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
                fence_put(e->fence);
                kfree(e);
        }
+
+       if (amdgpu_enable_semaphores)
+               return 0;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_fence *fence = sync->sync_to[i];
+               if (!fence)
+                       continue;
+
+               r = fence_wait(&fence->base, false);
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
@@ -251,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                        return -EINVAL;
                }
 
-               if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
+               if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
+                   (count >= AMDGPU_NUM_SYNCS)) {
                        /* not enough room, wait manually */
                        r = fence_wait(&fence->base, false);
                        if (r)
This page took 0.038057 seconds and 4 git commands to generate.