]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
Merge tag 'fbdev-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sched.c
index 787b93db67968fc99d480c3e934a86a6c9d97e92..de98fbd2971eded37ecb896921255d38787ce7b5 100644 (file)
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
-                                   struct amd_sched_entity *entity,
-                                   void *job)
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
 {
-       int r = 0;
-       struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
-       if (sched_job->prepare_job) {
-               r = sched_job->prepare_job(sched_job);
-               if (r) {
-                       DRM_ERROR("Prepare job error\n");
-                       schedule_work(&sched_job->job_work);
-               }
-       }
-       return r;
-}
-
-static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
-{
-       struct amd_sched_job *sched_job =
-               container_of(cb, struct amd_sched_job, cb);
-       amd_sched_process_job(sched_job);
+       struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
+       return amdgpu_sync_get_fence(&sched_job->ibs->sync);
 }
 
-static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
-                                struct amd_sched_entity *entity,
-                                struct amd_sched_job *job)
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
 {
-       int r = 0;
-       struct amdgpu_cs_parser *sched_job;
+       struct amdgpu_job *sched_job;
        struct amdgpu_fence *fence;
+       int r;
 
-       if (!job || !job->job) {
+       if (!job) {
                DRM_ERROR("job is null\n");
-               return;
+               return NULL;
        }
-       sched_job = (struct amdgpu_cs_parser *)job->job;
+       sched_job = (struct amdgpu_job *)job;
        mutex_lock(&sched_job->job_lock);
        r = amdgpu_ib_schedule(sched_job->adev,
                               sched_job->num_ibs,
                               sched_job->ibs,
-                              sched_job->filp);
+                              sched_job->base.owner);
        if (r)
                goto err;
-       fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
-       if (fence_add_callback(&fence->base,
-                              &job->cb, amdgpu_fence_sched_cb)) {
-               DRM_ERROR("fence add callback failed\n");
-               goto err;
-       }
+       fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
 
-       if (sched_job->run_job) {
-               r = sched_job->run_job(sched_job);
-               if (r)
-                       goto err;
-       }
-
-       amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
+       if (sched_job->free_job)
+               sched_job->free_job(sched_job);
 
        mutex_unlock(&sched_job->job_lock);
-       return;
+       return &fence->base;
+
 err:
        DRM_ERROR("Run job error\n");
        mutex_unlock(&sched_job->job_lock);
-       schedule_work(&sched_job->job_work);
+       job->sched->ops->process_job(job);
+       return NULL;
 }
 
-static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
+static void amdgpu_sched_process_job(struct amd_sched_job *job)
 {
-       struct amdgpu_cs_parser *sched_job = NULL;
-       struct amdgpu_fence *fence = NULL;
-       struct amdgpu_ring *ring = NULL;
-       struct amdgpu_device *adev = NULL;
+       struct amdgpu_job *sched_job;
 
-       if (!job)
-               return;
-       sched_job = (struct amdgpu_cs_parser *)job;
-       fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
-       if (!fence)
+       if (!job) {
+               DRM_ERROR("job is null\n");
                return;
-       ring = fence->ring;
-       adev = ring->adev;
-
-       schedule_work(&sched_job->job_work);
+       }
+       sched_job = (struct amdgpu_job *)job;
+       /* after processing job, free memory */
+       fence_put(&sched_job->base.s_fence->base);
+       kfree(sched_job);
 }
 
 struct amd_sched_backend_ops amdgpu_sched_ops = {
-       .prepare_job = amdgpu_sched_prepare_job,
+       .dependency = amdgpu_sched_dependency,
        .run_job = amdgpu_sched_run_job,
        .process_job = amdgpu_sched_process_job
 };
@@ -122,36 +90,39 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                                         struct amdgpu_ring *ring,
                                         struct amdgpu_ib *ibs,
                                         unsigned num_ibs,
-                                        int (*free_job)(struct amdgpu_cs_parser *),
+                                        int (*free_job)(struct amdgpu_job *),
                                         void *owner,
                                         struct fence **f)
 {
        int r = 0;
        if (amdgpu_enable_scheduler) {
-               uint64_t v_seq;
-               struct amdgpu_cs_parser *sched_job =
-                       amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
-                                               ibs, 1);
-               if(!sched_job) {
+               struct amdgpu_job *job =
+                       kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+               if (!job)
                        return -ENOMEM;
+               job->base.sched = ring->scheduler;
+               job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+               job->adev = adev;
+               job->ibs = ibs;
+               job->num_ibs = num_ibs;
+               job->base.owner = owner;
+               mutex_init(&job->job_lock);
+               job->free_job = free_job;
+               mutex_lock(&job->job_lock);
+               r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+               if (r) {
+                       mutex_unlock(&job->job_lock);
+                       kfree(job);
+                       return r;
                }
-               sched_job->free_job = free_job;
-               v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
-               ibs[num_ibs - 1].sequence = v_seq;
-               amd_sched_push_job(ring->scheduler,
-                                  &adev->kernel_ctx.rings[ring->idx].entity,
-                                  sched_job);
-               r = amd_sched_wait_emit(
-                       &adev->kernel_ctx.rings[ring->idx].entity,
-                       v_seq,
-                       false,
-                       -1);
+               *f = fence_get(&job->base.s_fence->base);
+               mutex_unlock(&job->job_lock);
+       } else {
+               r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
                if (r)
-                       WARN(true, "emit timeout\n");
-       } else
-               r = amdgpu_ib_schedule(adev, 1, ibs, owner);
-       if (r)
-               return r;
-       *f = &ibs[num_ibs - 1].fence->base;
+                       return r;
+               *f = fence_get(&ibs[num_ibs - 1].fence->base);
+       }
+
        return 0;
 }
This page took 0.035098 seconds and 4 git commands to generate.