2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
30 static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31 struct amd_context_entity *c_entity,
35 struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
36 if (sched_job->prepare_job) {
37 r = sched_job->prepare_job(sched_job);
39 DRM_ERROR("Prepare job error\n");
40 schedule_work(&sched_job->job_work);
46 static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
48 struct amd_sched_job *sched_job =
49 container_of(cb, struct amd_sched_job, cb);
50 amd_sched_process_job(sched_job);
53 static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
54 struct amd_context_entity *c_entity,
55 struct amd_sched_job *job)
58 struct amdgpu_cs_parser *sched_job;
59 struct amdgpu_fence *fence;
61 if (!job || !job->job) {
62 DRM_ERROR("job is null\n");
65 sched_job = (struct amdgpu_cs_parser *)job->job;
66 mutex_lock(&sched_job->job_lock);
67 r = amdgpu_ib_schedule(sched_job->adev,
73 fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
74 if (fence_add_callback(&fence->base,
75 &job->cb, amdgpu_fence_sched_cb)) {
76 DRM_ERROR("fence add callback failed\n");
80 if (sched_job->run_job) {
81 r = sched_job->run_job(sched_job);
86 amd_sched_emit(c_entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
88 mutex_unlock(&sched_job->job_lock);
91 DRM_ERROR("Run job error\n");
92 mutex_unlock(&sched_job->job_lock);
93 schedule_work(&sched_job->job_work);
96 static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
98 struct amdgpu_cs_parser *sched_job = NULL;
99 struct amdgpu_fence *fence = NULL;
100 struct amdgpu_ring *ring = NULL;
101 struct amdgpu_device *adev = NULL;
105 sched_job = (struct amdgpu_cs_parser *)job;
106 fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
112 schedule_work(&sched_job->job_work);
115 struct amd_sched_backend_ops amdgpu_sched_ops = {
116 .prepare_job = amdgpu_sched_prepare_job,
117 .run_job = amdgpu_sched_run_job,
118 .process_job = amdgpu_sched_process_job
121 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
122 struct amdgpu_ring *ring,
123 struct amdgpu_ib *ibs,
125 int (*free_job)(struct amdgpu_cs_parser *),
130 if (amdgpu_enable_scheduler) {
132 struct amdgpu_cs_parser *sched_job =
133 amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
138 sched_job->free_job = free_job;
139 v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
140 ibs[num_ibs - 1].sequence = v_seq;
141 amd_sched_push_job(ring->scheduler,
142 &adev->kernel_ctx.rings[ring->idx].c_entity,
144 r = amd_sched_wait_emit(
145 &adev->kernel_ctx.rings[ring->idx].c_entity,
150 WARN(true, "emit timeout\n");
152 r = amdgpu_ib_schedule(adev, 1, ibs, owner);
155 *f = &ibs[num_ibs - 1].fence->base;