]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drm/amdgpu: remove amdgpu_fence_wait
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sched.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29
30 static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31                                     struct amd_sched_entity *entity,
32                                     struct amd_sched_job *job)
33 {
34         int r = 0;
35         struct amdgpu_cs_parser *sched_job;
36         if (!job || !job->data) {
37                 DRM_ERROR("job is null\n");
38                 return -EINVAL;
39         }
40
41         sched_job = (struct amdgpu_cs_parser *)job->data;
42         if (sched_job->prepare_job) {
43                 r = sched_job->prepare_job(sched_job);
44                 if (r) {
45                         DRM_ERROR("Prepare job error\n");
46                         schedule_work(&sched_job->job_work);
47                 }
48         }
49         return r;
50 }
51
52 static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
53                                           struct amd_sched_entity *entity,
54                                           struct amd_sched_job *job)
55 {
56         int r = 0;
57         struct amdgpu_cs_parser *sched_job;
58         struct amdgpu_fence *fence;
59
60         if (!job || !job->data) {
61                 DRM_ERROR("job is null\n");
62                 return NULL;
63         }
64         sched_job = (struct amdgpu_cs_parser *)job->data;
65         mutex_lock(&sched_job->job_lock);
66         r = amdgpu_ib_schedule(sched_job->adev,
67                                sched_job->num_ibs,
68                                sched_job->ibs,
69                                sched_job->filp);
70         if (r)
71                 goto err;
72         fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
73
74         if (sched_job->run_job) {
75                 r = sched_job->run_job(sched_job);
76                 if (r)
77                         goto err;
78         }
79
80         amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
81
82         mutex_unlock(&sched_job->job_lock);
83         return &fence->base;
84
85 err:
86         DRM_ERROR("Run job error\n");
87         mutex_unlock(&sched_job->job_lock);
88         schedule_work(&sched_job->job_work);
89         return NULL;
90 }
91
92 static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
93                                      struct amd_sched_job *job)
94 {
95         struct amdgpu_cs_parser *sched_job;
96
97         if (!job || !job->data) {
98                 DRM_ERROR("job is null\n");
99                 return;
100         }
101         sched_job = (struct amdgpu_cs_parser *)job->data;
102         schedule_work(&sched_job->job_work);
103 }
104
105 struct amd_sched_backend_ops amdgpu_sched_ops = {
106         .prepare_job = amdgpu_sched_prepare_job,
107         .run_job = amdgpu_sched_run_job,
108         .process_job = amdgpu_sched_process_job
109 };
110
111 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
112                                          struct amdgpu_ring *ring,
113                                          struct amdgpu_ib *ibs,
114                                          unsigned num_ibs,
115                                          int (*free_job)(struct amdgpu_cs_parser *),
116                                          void *owner,
117                                          struct fence **f)
118 {
119         int r = 0;
120         if (amdgpu_enable_scheduler) {
121                 struct amdgpu_cs_parser *sched_job =
122                         amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
123                                                 ibs, num_ibs);
124                 if(!sched_job) {
125                         return -ENOMEM;
126                 }
127                 sched_job->free_job = free_job;
128                 mutex_lock(&sched_job->job_lock);
129                 r = amd_sched_push_job(ring->scheduler,
130                                        &adev->kernel_ctx.rings[ring->idx].entity,
131                                        sched_job, &sched_job->s_fence);
132                 if (r) {
133                         mutex_unlock(&sched_job->job_lock);
134                         kfree(sched_job);
135                         return r;
136                 }
137                 ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
138                 *f = &sched_job->s_fence->base;
139                 mutex_unlock(&sched_job->job_lock);
140         } else {
141                 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
142                 if (r)
143                         return r;
144                 *f = &ibs[num_ibs - 1].fence->base;
145         }
146         return 0;
147 }
This page took 0.04172 seconds and 4 git commands to generate.