]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drm/amdgpu: add amd_sched_commit
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sched.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29
30 static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31                                     struct amd_context_entity *c_entity,
32                                     void *job)
33 {
34         int r = 0;
35         struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
36         if (sched_job->prepare_job) {
37                 r = sched_job->prepare_job(sched_job);
38                 if (r) {
39                         DRM_ERROR("Prepare job error\n");
40                         schedule_work(&sched_job->job_work);
41                 }
42         }
43         return r;
44 }
45
46 static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
47                                  struct amd_context_entity *c_entity,
48                                  void *job)
49 {
50         int r = 0;
51         struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
52
53         mutex_lock(&sched_job->job_lock);
54         r = amdgpu_ib_schedule(sched_job->adev,
55                                sched_job->num_ibs,
56                                sched_job->ibs,
57                                sched_job->filp);
58         if (r)
59                 goto err;
60         if (sched_job->run_job) {
61                 r = sched_job->run_job(sched_job);
62                 if (r)
63                         goto err;
64         }
65
66         amd_sched_emit(c_entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
67
68         mutex_unlock(&sched_job->job_lock);
69         return;
70 err:
71         DRM_ERROR("Run job error\n");
72         mutex_unlock(&sched_job->job_lock);
73         schedule_work(&sched_job->job_work);
74 }
75
76 static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
77 {
78         struct amdgpu_cs_parser *sched_job = NULL;
79         struct amdgpu_fence *fence = NULL;
80         struct amdgpu_ring *ring = NULL;
81         struct amdgpu_device *adev = NULL;
82
83         if (!job)
84                 return;
85         sched_job = (struct amdgpu_cs_parser *)job;
86         fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
87         if (!fence)
88                 return;
89         ring = fence->ring;
90         adev = ring->adev;
91
92         schedule_work(&sched_job->job_work);
93 }
94
95 struct amd_sched_backend_ops amdgpu_sched_ops = {
96         .prepare_job = amdgpu_sched_prepare_job,
97         .run_job = amdgpu_sched_run_job,
98         .process_job = amdgpu_sched_process_job
99 };
100
101 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
102                                          struct amdgpu_ring *ring,
103                                          struct amdgpu_ib *ibs,
104                                          unsigned num_ibs,
105                                          int (*free_job)(struct amdgpu_cs_parser *),
106                                          void *owner,
107                                          struct fence **f)
108 {
109         int r = 0;
110         if (amdgpu_enable_scheduler) {
111                 struct amdgpu_cs_parser *sched_job =
112                         amdgpu_cs_parser_create(adev,
113                                                 owner,
114                                                 adev->kernel_ctx,
115                                                 ibs, 1);
116                 if(!sched_job) {
117                         return -ENOMEM;
118                 }
119                 sched_job->free_job = free_job;
120                 ibs[num_ibs - 1].sequence = amd_sched_push_job(ring->scheduler,
121                                    &adev->kernel_ctx->rings[ring->idx].c_entity,
122                                    sched_job);
123                 r = amd_sched_wait_emit(
124                         &adev->kernel_ctx->rings[ring->idx].c_entity,
125                         ibs[num_ibs - 1].sequence, false, -1);
126                 if (r)
127                         WARN(true, "emit timeout\n");
128         } else
129                 r = amdgpu_ib_schedule(adev, 1, ibs, owner);
130         if (r)
131                 return r;
132         *f = &ibs[num_ibs - 1].fence->base;
133         return 0;
134 }
This page took 0.044079 seconds and 4 git commands to generate.