]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drm/amdgpu: fix null pointer by previous cleanup
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sched.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29
30 static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31                                     struct amd_context_entity *c_entity,
32                                     void *job)
33 {
34         int r = 0;
35         struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
36         if (sched_job->prepare_job)
37                 r = sched_job->prepare_job(sched_job);
38         if (r) {
39                 DRM_ERROR("Prepare job error\n");
40                 schedule_work(&sched_job->job_work);
41         }
42         return r;
43 }
44
45 static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
46                                  struct amd_context_entity *c_entity,
47                                  void *job)
48 {
49         int r = 0;
50         struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
51
52         mutex_lock(&sched_job->job_lock);
53         r = amdgpu_ib_schedule(sched_job->adev,
54                                sched_job->num_ibs,
55                                sched_job->ibs,
56                                sched_job->filp);
57         if (r)
58                 goto err;
59         if (sched_job->run_job) {
60                 r = sched_job->run_job(sched_job);
61                 if (r)
62                         goto err;
63         }
64         atomic64_set(&c_entity->last_emitted_v_seq,
65                      sched_job->ibs[sched_job->num_ibs - 1].sequence);
66         wake_up_all(&c_entity->wait_emit);
67
68         mutex_unlock(&sched_job->job_lock);
69         return;
70 err:
71         DRM_ERROR("Run job error\n");
72         mutex_unlock(&sched_job->job_lock);
73         schedule_work(&sched_job->job_work);
74 }
75
76 static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
77 {
78         struct amdgpu_cs_parser *sched_job = NULL;
79         struct amdgpu_fence *fence = NULL;
80         struct amdgpu_ring *ring = NULL;
81         struct amdgpu_device *adev = NULL;
82
83         if (!job)
84                 return;
85         sched_job = (struct amdgpu_cs_parser *)job;
86         fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
87         if (!fence)
88                 return;
89         ring = fence->ring;
90         adev = ring->adev;
91
92         schedule_work(&sched_job->job_work);
93 }
94
95 struct amd_sched_backend_ops amdgpu_sched_ops = {
96         .prepare_job = amdgpu_sched_prepare_job,
97         .run_job = amdgpu_sched_run_job,
98         .process_job = amdgpu_sched_process_job
99 };
100
101 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
102                                          struct amdgpu_ring *ring,
103                                          struct amdgpu_ib *ibs,
104                                          unsigned num_ibs,
105                                          int (*free_job)(struct amdgpu_cs_parser *),
106                                          void *owner,
107                                          struct fence **f)
108 {
109         int r = 0;
110         if (amdgpu_enable_scheduler) {
111                 uint64_t v_seq;
112                 struct amdgpu_cs_parser *sched_job =
113                         amdgpu_cs_parser_create(adev,
114                                                 owner,
115                                                 adev->kernel_ctx,
116                                                 ibs, 1);
117                 if(!sched_job) {
118                         return -ENOMEM;
119                 }
120                 sched_job->free_job = free_job;
121                 v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
122                 ibs[num_ibs - 1].sequence = v_seq;
123                 amd_sched_push_job(ring->scheduler,
124                                    &adev->kernel_ctx->rings[ring->idx].c_entity,
125                                    sched_job);
126                 r = amd_sched_wait_emit(
127                         &adev->kernel_ctx->rings[ring->idx].c_entity,
128                         v_seq,
129                         false,
130                         -1);
131                 if (r)
132                         WARN(true, "emit timeout\n");
133         } else
134                 r = amdgpu_ib_schedule(adev, 1, ibs, owner);
135         if (r)
136                 return r;
137         *f = &ibs[num_ibs - 1].fence->base;
138         return 0;
139 }
This page took 0.041689 seconds and 4 git commands to generate.