]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drm/amdgpu: use kmemdup rather than duplicating its implementation
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
index 7cb711fc1ee29490df558cc60cdc5a278d6ef135..4921de15b45158fe89af11d540eebcf998e2983c 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+struct amdgpu_sync_entry {
+       struct hlist_node       node;
+       struct fence            *fence;
+};
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -49,9 +54,39 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                sync->sync_to[i] = NULL;
 
+       hash_init(sync->fences);
        sync->last_vm_update = NULL;
 }
 
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+       if (a_fence)
+               return a_fence->ring->adev == adev;
+
+       if (s_fence) {
+               struct amdgpu_ring *ring;
+
+               ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+               return ring->adev == adev;
+       }
+
+       return false;
+}
+
+static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+       if (s_fence)
+               return s_fence->owner == owner;
+       if (a_fence)
+               return a_fence->owner == owner;
+       return false;
+}
+
 /**
  * amdgpu_sync_fence - remember to sync to this fence
  *
@@ -62,31 +97,69 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                      struct fence *f)
 {
+       struct amdgpu_sync_entry *e;
        struct amdgpu_fence *fence;
        struct amdgpu_fence *other;
+       struct fence *tmp, *later;
 
        if (!f)
                return 0;
 
+       if (amdgpu_sync_same_dev(adev, f) &&
+           amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
+               if (sync->last_vm_update) {
+                       tmp = sync->last_vm_update;
+                       BUG_ON(f->context != tmp->context);
+                       later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
+                       sync->last_vm_update = fence_get(later);
+                       fence_put(tmp);
+               } else
+                       sync->last_vm_update = fence_get(f);
+       }
+
        fence = to_amdgpu_fence(f);
-       if (!fence || fence->ring->adev != adev)
-               return fence_wait(f, true);
+       if (!fence || fence->ring->adev != adev) {
+               hash_for_each_possible(sync->fences, e, node, f->context) {
+                       struct fence *new;
+                       if (unlikely(e->fence->context != f->context))
+                               continue;
+                       new = fence_get(fence_later(e->fence, f));
+                       if (new) {
+                               fence_put(e->fence);
+                               e->fence = new;
+                       }
+                       return 0;
+               }
+
+               e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+               if (!e)
+                       return -ENOMEM;
+
+               hash_add(sync->fences, &e->node, f->context);
+               e->fence = fence_get(f);
+               return 0;
+       }
 
        other = sync->sync_to[fence->ring->idx];
        sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
                amdgpu_fence_later(fence, other));
        amdgpu_fence_unref(&other);
 
-       if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
-               other = sync->last_vm_update;
-               sync->last_vm_update = amdgpu_fence_ref(
-                       amdgpu_fence_later(fence, other));
-               amdgpu_fence_unref(&other);
-       }
-
        return 0;
 }
 
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+       if (s_fence)
+               return s_fence->owner;
+       else if (a_fence)
+               return a_fence->owner;
+       return AMDGPU_FENCE_OWNER_UNDEFINED;
+}
+
 /**
  * amdgpu_sync_resv - use the semaphores to sync to a reservation object
  *
@@ -103,7 +176,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 {
        struct reservation_object_list *flist;
        struct fence *f;
-       struct amdgpu_fence *fence;
+       void *fence_owner;
        unsigned i;
        int r = 0;
 
@@ -121,22 +194,22 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        for (i = 0; i < flist->shared_count; ++i) {
                f = rcu_dereference_protected(flist->shared[i],
                                              reservation_object_held(resv));
-               fence = f ? to_amdgpu_fence(f) : NULL;
-               if (fence && fence->ring->adev == adev) {
+               if (amdgpu_sync_same_dev(adev, f)) {
                        /* VM updates are only interesting
                         * for other VM updates and moves.
                         */
+                       fence_owner = amdgpu_sync_get_owner(f);
                        if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
-                           (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
+                           (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
                            ((owner == AMDGPU_FENCE_OWNER_VM) !=
-                            (fence->owner == AMDGPU_FENCE_OWNER_VM)))
+                            (fence_owner == AMDGPU_FENCE_OWNER_VM)))
                                continue;
 
                        /* Ignore fence from the same owner as
                         * long as it isn't undefined.
                         */
                        if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-                           fence->owner == owner)
+                           fence_owner == owner)
                                continue;
                }
 
@@ -147,6 +220,60 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        return r;
 }
 
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       struct fence *f;
+       int i;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+               f = e->fence;
+
+               hash_del(&e->node);
+               kfree(e);
+
+               if (!fence_is_signaled(f))
+                       return f;
+
+               fence_put(f);
+       }
+       return NULL;
+}
+
+int amdgpu_sync_wait(struct amdgpu_sync *sync)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       int i, r;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+               r = fence_wait(e->fence, false);
+               if (r)
+                       return r;
+
+               hash_del(&e->node);
+               fence_put(e->fence);
+               kfree(e);
+       }
+
+       if (amdgpu_enable_semaphores)
+               return 0;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_fence *fence = sync->sync_to[i];
+               if (!fence)
+                       continue;
+
+               r = fence_wait(&fence->base, false);
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
 /**
  * amdgpu_sync_rings - sync ring to all registered fences
  *
@@ -178,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                        return -EINVAL;
                }
 
-               if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
+               if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
+                   (count >= AMDGPU_NUM_SYNCS)) {
                        /* not enough room, wait manually */
                        r = fence_wait(&fence->base, false);
                        if (r)
@@ -234,15 +362,23 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
  */
 void amdgpu_sync_free(struct amdgpu_device *adev,
                      struct amdgpu_sync *sync,
-                     struct amdgpu_fence *fence)
+                     struct fence *fence)
 {
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
        unsigned i;
 
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+               hash_del(&e->node);
+               fence_put(e->fence);
+               kfree(e);
+       }
+
        for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
                amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                amdgpu_fence_unref(&sync->sync_to[i]);
 
-       amdgpu_fence_unref(&sync->last_vm_update);
+       fence_put(sync->last_vm_update);
 }
This page took 0.042282 seconds and 4 git commands to generate.