]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
Merge tag 'fbdev-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sched.c
index d682fabca958435a147d66d119a559e9d276a537..de98fbd2971eded37ecb896921255d38787ce7b5 100644 (file)
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
-                                   struct amd_context_entity *c_entity,
-                                   void *job)
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
 {
-       int r = 0;
-       struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
-       if (sched_job->prepare_job)
-               r = sched_job->prepare_job(sched_job);
-       if (r) {
-               DRM_ERROR("Prepare job error\n");
-               schedule_work(&sched_job->job_work);
-       }
-       return r;
+       struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
+       return amdgpu_sync_get_fence(&sched_job->ibs->sync);
 }
 
-static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
-                                struct amd_context_entity *c_entity,
-                                void *job)
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
 {
-       int r = 0;
-       struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
+       struct amdgpu_job *sched_job;
+       struct amdgpu_fence *fence;
+       int r;
 
+       if (!job) {
+               DRM_ERROR("job is null\n");
+               return NULL;
+       }
+       sched_job = (struct amdgpu_job *)job;
        mutex_lock(&sched_job->job_lock);
        r = amdgpu_ib_schedule(sched_job->adev,
                               sched_job->num_ibs,
                               sched_job->ibs,
-                              sched_job->filp);
+                              sched_job->base.owner);
        if (r)
                goto err;
-       if (sched_job->run_job) {
-               r = sched_job->run_job(sched_job);
-               if (r)
-                       goto err;
-       }
-       atomic64_set(&c_entity->last_emitted_v_seq,
-                    sched_job->ibs[sched_job->num_ibs - 1].sequence);
-       wake_up_all(&c_entity->wait_emit);
+       fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
+
+       if (sched_job->free_job)
+               sched_job->free_job(sched_job);
 
        mutex_unlock(&sched_job->job_lock);
-       return;
+       return &fence->base;
+
 err:
        DRM_ERROR("Run job error\n");
        mutex_unlock(&sched_job->job_lock);
-       schedule_work(&sched_job->job_work);
+       job->sched->ops->process_job(job);
+       return NULL;
 }
 
-static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
+static void amdgpu_sched_process_job(struct amd_sched_job *job)
 {
-       struct amdgpu_cs_parser *sched_job = NULL;
-       struct amdgpu_fence *fence = NULL;
-       struct amdgpu_ring *ring = NULL;
-       struct amdgpu_device *adev = NULL;
-       struct amd_context_entity *c_entity = NULL;
+       struct amdgpu_job *sched_job;
 
-       if (!job)
-               return;
-       sched_job = (struct amdgpu_cs_parser *)job;
-       fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
-       if (!fence)
+       if (!job) {
+               DRM_ERROR("job is null\n");
                return;
-       ring = fence->ring;
-       adev = ring->adev;
-
-       if (sched_job->ctx) {
-               c_entity = &sched_job->ctx->rings[ring->idx].c_entity;
-               atomic64_set(&c_entity->last_signaled_v_seq,
-                            sched_job->ibs[sched_job->num_ibs - 1].sequence);
        }
-
-       /* wake up users waiting for time stamp */
-       wake_up_all(&c_entity->wait_queue);
-
-       schedule_work(&sched_job->job_work);
+       sched_job = (struct amdgpu_job *)job;
+       /* after processing job, free memory */
+       fence_put(&sched_job->base.s_fence->base);
+       kfree(sched_job);
 }
 
 struct amd_sched_backend_ops amdgpu_sched_ops = {
-       .prepare_job = amdgpu_sched_prepare_job,
+       .dependency = amdgpu_sched_dependency,
        .run_job = amdgpu_sched_run_job,
        .process_job = amdgpu_sched_process_job
 };
@@ -112,34 +90,39 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                                         struct amdgpu_ring *ring,
                                         struct amdgpu_ib *ibs,
                                         unsigned num_ibs,
-                                        int (*free_job)(struct amdgpu_cs_parser *),
-                                        void *owner)
+                                        int (*free_job)(struct amdgpu_job *),
+                                        void *owner,
+                                        struct fence **f)
 {
        int r = 0;
        if (amdgpu_enable_scheduler) {
-               uint64_t v_seq;
-               struct amdgpu_cs_parser *sched_job =
-                       amdgpu_cs_parser_create(adev,
-                                               owner,
-                                               adev->kernel_ctx,
-                                               ibs, 1);
-               if(!sched_job) {
+               struct amdgpu_job *job =
+                       kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+               if (!job)
                        return -ENOMEM;
+               job->base.sched = ring->scheduler;
+               job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+               job->adev = adev;
+               job->ibs = ibs;
+               job->num_ibs = num_ibs;
+               job->base.owner = owner;
+               mutex_init(&job->job_lock);
+               job->free_job = free_job;
+               mutex_lock(&job->job_lock);
+               r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+               if (r) {
+                       mutex_unlock(&job->job_lock);
+                       kfree(job);
+                       return r;
                }
-               sched_job->free_job = free_job;
-               v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
-               ibs[num_ibs - 1].sequence = v_seq;
-               amd_sched_push_job(ring->scheduler,
-                                  &adev->kernel_ctx->rings[ring->idx].c_entity,
-                                  sched_job);
-               r = amd_sched_wait_emit(
-                       &adev->kernel_ctx->rings[ring->idx].c_entity,
-                       v_seq,
-                       false,
-                       -1);
+               *f = fence_get(&job->base.s_fence->base);
+               mutex_unlock(&job->job_lock);
+       } else {
+               r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
                if (r)
-                       WARN(true, "emit timeout\n");
-       } else
-               r = amdgpu_ib_schedule(adev, 1, ibs, owner);
-       return r;
+                       return r;
+               *f = fence_get(&ibs[num_ibs - 1].fence->base);
+       }
+
+       return 0;
 }
This page took 0.040418 seconds and 4 git commands to generate.