]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
Merge tag 'ceph-for-4.16-rc1' of git://github.com/ceph/ceph-client
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
index 0cfc68db575b1ec8c220060154e2485ff8cecf78..2bd56760c7441fdc720d3a469fef424ae39741ea 100644 (file)
@@ -28,7 +28,7 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
 {
        struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
@@ -37,10 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
                  atomic_read(&job->ring->fence_drv.last_seq),
                  job->ring->fence_drv.sync_seq);
 
-       if (amdgpu_sriov_vf(job->adev))
-               amdgpu_sriov_gpu_reset(job->adev, job);
-       else
-               amdgpu_gpu_reset(job->adev);
+       amdgpu_device_gpu_recover(job->adev, job, false);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +60,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        (*job)->num_ibs = num_ibs;
 
        amdgpu_sync_create(&(*job)->sync);
-       amdgpu_sync_create(&(*job)->dep_sync);
        amdgpu_sync_create(&(*job)->sched_sync);
        (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
 
@@ -100,14 +96,13 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
                amdgpu_ib_free(job->adev, &job->ibs[i], f);
 }
 
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 {
        struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
-       amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
+       amdgpu_ring_priority_put(job->ring, s_job->s_priority);
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
-       amdgpu_sync_free(&job->dep_sync);
        amdgpu_sync_free(&job->sched_sync);
        kfree(job);
 }
@@ -118,13 +113,12 @@ void amdgpu_job_free(struct amdgpu_job *job)
 
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
-       amdgpu_sync_free(&job->dep_sync);
        amdgpu_sync_free(&job->sched_sync);
        kfree(job);
 }
 
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     struct amd_sched_entity *entity, void *owner,
+                     struct drm_sched_entity *entity, void *owner,
                      struct dma_fence **f)
 {
        int r;
@@ -133,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
        if (!f)
                return -EINVAL;
 
-       r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
+       r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
        if (r)
                return r;
 
@@ -141,46 +135,47 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
        job->fence_ctx = entity->fence_context;
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
-       amdgpu_ring_priority_get(job->ring,
-                                amd_sched_get_job_priority(&job->base));
-       amd_sched_entity_push_job(&job->base);
+       amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+       drm_sched_entity_push_job(&job->base, entity);
 
        return 0;
 }
 
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+                                              struct drm_sched_entity *s_entity)
 {
        struct amdgpu_job *job = to_amdgpu_job(sched_job);
        struct amdgpu_vm *vm = job->vm;
-
-       struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
+       bool explicit = false;
        int r;
-
-       if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
-               r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
-               if (r)
-                       DRM_ERROR("Error adding fence to sync (%d)\n", r);
+       struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+
+       if (fence && explicit) {
+               if (drm_sched_dependency_optimized(fence, s_entity)) {
+                       r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+                       if (r)
+                               DRM_ERROR("Error adding fence to sync (%d)\n", r);
+               }
        }
-       if (!fence)
-               fence = amdgpu_sync_get_fence(&job->sync);
-       while (fence == NULL && vm && !job->vm_id) {
+
+       while (fence == NULL && vm && !job->vmid) {
                struct amdgpu_ring *ring = job->ring;
 
-               r = amdgpu_vm_grab_id(vm, ring, &job->sync,
-                                     &job->base.s_fence->finished,
-                                     job);
+               r = amdgpu_vmid_grab(vm, ring, &job->sync,
+                                    &job->base.s_fence->finished,
+                                    job);
                if (r)
                        DRM_ERROR("Error getting VM ID (%d)\n", r);
 
-               fence = amdgpu_sync_get_fence(&job->sync);
+               fence = amdgpu_sync_get_fence(&job->sync, NULL);
        }
 
        return fence;
 }
 
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
 {
-       struct dma_fence *fence = NULL;
+       struct dma_fence *fence = NULL, *finished;
        struct amdgpu_device *adev;
        struct amdgpu_job *job;
        int r;
@@ -190,15 +185,18 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
                return NULL;
        }
        job = to_amdgpu_job(sched_job);
+       finished = &job->base.s_fence->finished;
        adev = job->adev;
 
        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
-       /* skip ib schedule when vram is lost */
-       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
-               dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
-               DRM_ERROR("Skip scheduling IBs!\n");
+
+       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+               dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+
+       if (finished->error < 0) {
+               DRM_INFO("Skip scheduling IBs!\n");
        } else {
                r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
                                       &fence);
@@ -213,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
        return fence;
 }
 
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
        .dependency = amdgpu_job_dependency,
        .run_job = amdgpu_job_run,
        .timedout_job = amdgpu_job_timedout,
This page took 0.040173 seconds and 4 git commands to generate.