]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
Merge tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30
31 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32 {
33         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34         struct amdgpu_job *job = to_amdgpu_job(s_job);
35
36         if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
37                 DRM_ERROR("ring %s timeout, but soft recovered\n",
38                           s_job->sched->name);
39                 return;
40         }
41
42         DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
43                   job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
44                   ring->fence_drv.sync_seq);
45
46         if (amdgpu_device_should_recover_gpu(ring->adev))
47                 amdgpu_device_gpu_recover(ring->adev, job);
48 }
49
50 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
51                      struct amdgpu_job **job, struct amdgpu_vm *vm)
52 {
53         size_t size = sizeof(struct amdgpu_job);
54
55         if (num_ibs == 0)
56                 return -EINVAL;
57
58         size += sizeof(struct amdgpu_ib) * num_ibs;
59
60         *job = kzalloc(size, GFP_KERNEL);
61         if (!*job)
62                 return -ENOMEM;
63
64         /*
65          * Initialize the scheduler to at least some ring so that we always
66          * have a pointer to adev.
67          */
68         (*job)->base.sched = &adev->rings[0]->sched;
69         (*job)->vm = vm;
70         (*job)->ibs = (void *)&(*job)[1];
71         (*job)->num_ibs = num_ibs;
72
73         amdgpu_sync_create(&(*job)->sync);
74         amdgpu_sync_create(&(*job)->sched_sync);
75         (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
76         (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
77
78         return 0;
79 }
80
81 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
82                              struct amdgpu_job **job)
83 {
84         int r;
85
86         r = amdgpu_job_alloc(adev, 1, job, NULL);
87         if (r)
88                 return r;
89
90         r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
91         if (r)
92                 kfree(*job);
93
94         return r;
95 }
96
97 void amdgpu_job_free_resources(struct amdgpu_job *job)
98 {
99         struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
100         struct dma_fence *f;
101         unsigned i;
102
103         /* use sched fence if available */
104         f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
105
106         for (i = 0; i < job->num_ibs; ++i)
107                 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
108 }
109
110 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
111 {
112         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
113         struct amdgpu_job *job = to_amdgpu_job(s_job);
114
115         drm_sched_job_cleanup(s_job);
116
117         amdgpu_ring_priority_put(ring, s_job->s_priority);
118         dma_fence_put(job->fence);
119         amdgpu_sync_free(&job->sync);
120         amdgpu_sync_free(&job->sched_sync);
121         kfree(job);
122 }
123
124 void amdgpu_job_free(struct amdgpu_job *job)
125 {
126         amdgpu_job_free_resources(job);
127
128         dma_fence_put(job->fence);
129         amdgpu_sync_free(&job->sync);
130         amdgpu_sync_free(&job->sched_sync);
131         kfree(job);
132 }
133
134 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
135                       void *owner, struct dma_fence **f)
136 {
137         enum drm_sched_priority priority;
138         struct amdgpu_ring *ring;
139         int r;
140
141         if (!f)
142                 return -EINVAL;
143
144         r = drm_sched_job_init(&job->base, entity, owner);
145         if (r)
146                 return r;
147
148         job->owner = owner;
149         *f = dma_fence_get(&job->base.s_fence->finished);
150         amdgpu_job_free_resources(job);
151         priority = job->base.s_priority;
152         drm_sched_entity_push_job(&job->base, entity);
153
154         ring = to_amdgpu_ring(entity->rq->sched);
155         amdgpu_ring_priority_get(ring, priority);
156
157         return 0;
158 }
159
160 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
161                              struct dma_fence **fence)
162 {
163         int r;
164
165         job->base.sched = &ring->sched;
166         r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
167         job->fence = dma_fence_get(*fence);
168         if (r)
169                 return r;
170
171         amdgpu_job_free(job);
172         return 0;
173 }
174
175 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
176                                                struct drm_sched_entity *s_entity)
177 {
178         struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
179         struct amdgpu_job *job = to_amdgpu_job(sched_job);
180         struct amdgpu_vm *vm = job->vm;
181         struct dma_fence *fence;
182         bool explicit = false;
183         int r;
184
185         fence = amdgpu_sync_get_fence(&job->sync, &explicit);
186         if (fence && explicit) {
187                 if (drm_sched_dependency_optimized(fence, s_entity)) {
188                         r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
189                                               fence, false);
190                         if (r)
191                                 DRM_ERROR("Error adding fence (%d)\n", r);
192                 }
193         }
194
195         while (fence == NULL && vm && !job->vmid) {
196                 r = amdgpu_vmid_grab(vm, ring, &job->sync,
197                                      &job->base.s_fence->finished,
198                                      job);
199                 if (r)
200                         DRM_ERROR("Error getting VM ID (%d)\n", r);
201
202                 fence = amdgpu_sync_get_fence(&job->sync, NULL);
203         }
204
205         return fence;
206 }
207
208 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
209 {
210         struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
211         struct dma_fence *fence = NULL, *finished;
212         struct amdgpu_job *job;
213         int r;
214
215         job = to_amdgpu_job(sched_job);
216         finished = &job->base.s_fence->finished;
217
218         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
219
220         trace_amdgpu_sched_run_job(job);
221
222         if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
223                 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
224
225         if (finished->error < 0) {
226                 DRM_INFO("Skip scheduling IBs!\n");
227         } else {
228                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
229                                        &fence);
230                 if (r)
231                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
232         }
233         /* if gpu reset, hw fence will be replaced here */
234         dma_fence_put(job->fence);
235         job->fence = dma_fence_get(fence);
236
237         amdgpu_job_free_resources(job);
238         return fence;
239 }
240
241 const struct drm_sched_backend_ops amdgpu_sched_ops = {
242         .dependency = amdgpu_job_dependency,
243         .run_job = amdgpu_job_run,
244         .timedout_job = amdgpu_job_timedout,
245         .free_job = amdgpu_job_free_cb
246 };
This page took 0.048642 seconds and 4 git commands to generate.