]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drm/amdgpu: fix coding style in a couple of places
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sched.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29
30 static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31                                     struct amd_context_entity *c_entity,
32                                     void *job)
33 {
34         int r = 0;
35         struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
36         if (sched_job->prepare_job) {
37                 r = sched_job->prepare_job(sched_job);
38                 if (r) {
39                         DRM_ERROR("Prepare job error\n");
40                         schedule_work(&sched_job->job_work);
41                 }
42         }
43         return r;
44 }
45
46 static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
47 {
48         struct amd_sched_job *sched_job =
49                 container_of(cb, struct amd_sched_job, cb);
50         amd_sched_process_job(sched_job);
51 }
52
53 static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
54                                  struct amd_context_entity *c_entity,
55                                  struct amd_sched_job *job)
56 {
57         int r = 0;
58         struct amdgpu_cs_parser *sched_job;
59         struct amdgpu_fence *fence;
60
61         if (!job || !job->job) {
62                 DRM_ERROR("job is null\n");
63                 return;
64         }
65         sched_job = (struct amdgpu_cs_parser *)job->job;
66         mutex_lock(&sched_job->job_lock);
67         r = amdgpu_ib_schedule(sched_job->adev,
68                                sched_job->num_ibs,
69                                sched_job->ibs,
70                                sched_job->filp);
71         if (r)
72                 goto err;
73         fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
74         if (fence_add_callback(&fence->base,
75                                &job->cb, amdgpu_fence_sched_cb)) {
76                 DRM_ERROR("fence add callback failed\n");
77                 goto err;
78         }
79
80         if (sched_job->run_job) {
81                 r = sched_job->run_job(sched_job);
82                 if (r)
83                         goto err;
84         }
85
86         amd_sched_emit(c_entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
87
88         mutex_unlock(&sched_job->job_lock);
89         return;
90 err:
91         DRM_ERROR("Run job error\n");
92         mutex_unlock(&sched_job->job_lock);
93         schedule_work(&sched_job->job_work);
94 }
95
96 static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
97 {
98         struct amdgpu_cs_parser *sched_job = NULL;
99         struct amdgpu_fence *fence = NULL;
100         struct amdgpu_ring *ring = NULL;
101         struct amdgpu_device *adev = NULL;
102
103         if (!job)
104                 return;
105         sched_job = (struct amdgpu_cs_parser *)job;
106         fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
107         if (!fence)
108                 return;
109         ring = fence->ring;
110         adev = ring->adev;
111
112         schedule_work(&sched_job->job_work);
113 }
114
115 struct amd_sched_backend_ops amdgpu_sched_ops = {
116         .prepare_job = amdgpu_sched_prepare_job,
117         .run_job = amdgpu_sched_run_job,
118         .process_job = amdgpu_sched_process_job
119 };
120
121 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
122                                          struct amdgpu_ring *ring,
123                                          struct amdgpu_ib *ibs,
124                                          unsigned num_ibs,
125                                          int (*free_job)(struct amdgpu_cs_parser *),
126                                          void *owner,
127                                          struct fence **f)
128 {
129         int r = 0;
130         if (amdgpu_enable_scheduler) {
131                 uint64_t v_seq;
132                 struct amdgpu_cs_parser *sched_job =
133                         amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
134                                                 ibs, 1);
135                 if(!sched_job) {
136                         return -ENOMEM;
137                 }
138                 sched_job->free_job = free_job;
139                 v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
140                 ibs[num_ibs - 1].sequence = v_seq;
141                 amd_sched_push_job(ring->scheduler,
142                                    &adev->kernel_ctx.rings[ring->idx].c_entity,
143                                    sched_job);
144                 r = amd_sched_wait_emit(
145                         &adev->kernel_ctx.rings[ring->idx].c_entity,
146                         v_seq,
147                         false,
148                         -1);
149                 if (r)
150                         WARN(true, "emit timeout\n");
151         } else
152                 r = amdgpu_ib_schedule(adev, 1, ibs, owner);
153         if (r)
154                 return r;
155         *f = &ibs[num_ibs - 1].fence->base;
156         return 0;
157 }
This page took 0.065154 seconds and 4 git commands to generate.