2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
29 #include "amdgpu_trace.h"
31 static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
33 struct amdgpu_job *job = to_amdgpu_job(sched_job);
34 return amdgpu_sync_get_fence(&job->ibs->sync);
37 static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
39 struct amdgpu_fence *fence = NULL;
40 struct amdgpu_job *job;
44 DRM_ERROR("job is null\n");
47 job = to_amdgpu_job(sched_job);
48 mutex_lock(&job->job_lock);
49 trace_amdgpu_sched_run_job(job);
50 r = amdgpu_ib_schedule(job->adev,
55 DRM_ERROR("Error scheduling IBs (%d)\n", r);
59 fence = job->ibs[job->num_ibs - 1].fence;
60 fence_get(&fence->base);
66 mutex_unlock(&job->job_lock);
68 return fence ? &fence->base : NULL;
71 struct amd_sched_backend_ops amdgpu_sched_ops = {
72 .dependency = amdgpu_sched_dependency,
73 .run_job = amdgpu_sched_run_job,
76 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
77 struct amdgpu_ring *ring,
78 struct amdgpu_ib *ibs,
80 int (*free_job)(struct amdgpu_job *),
85 if (amdgpu_enable_scheduler) {
86 struct amdgpu_job *job =
87 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
90 job->base.sched = &ring->sched;
91 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
94 job->num_ibs = num_ibs;
95 job->base.owner = owner;
96 mutex_init(&job->job_lock);
97 job->free_job = free_job;
98 mutex_lock(&job->job_lock);
99 r = amd_sched_entity_push_job(&job->base);
101 mutex_unlock(&job->job_lock);
105 *f = fence_get(&job->base.s_fence->base);
106 mutex_unlock(&job->job_lock);
108 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
111 *f = fence_get(&ibs[num_ibs - 1].fence->base);