]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
Merge branch 'for-6.2/mcp2221' into for-linus
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27
28 #include <drm/drm_drv.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 #include "amdgpu_reset.h"
33
34 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
35 {
36         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
37         struct amdgpu_job *job = to_amdgpu_job(s_job);
38         struct amdgpu_task_info ti;
39         struct amdgpu_device *adev = ring->adev;
40         int idx;
41         int r;
42
43         if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
44                 DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
45                          __func__, s_job->sched->name);
46
47                 /* Effectively the job is aborted as the device is gone */
48                 return DRM_GPU_SCHED_STAT_ENODEV;
49         }
50
51         memset(&ti, 0, sizeof(struct amdgpu_task_info));
52         adev->job_hang = true;
53
54         if (amdgpu_gpu_recovery &&
55             amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
56                 DRM_ERROR("ring %s timeout, but soft recovered\n",
57                           s_job->sched->name);
58                 goto exit;
59         }
60
61         amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
62         DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
63                   job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
64                   ring->fence_drv.sync_seq);
65         DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
66                   ti.process_name, ti.tgid, ti.task_name, ti.pid);
67
68         if (amdgpu_device_should_recover_gpu(ring->adev)) {
69                 struct amdgpu_reset_context reset_context;
70                 memset(&reset_context, 0, sizeof(reset_context));
71
72                 reset_context.method = AMD_RESET_METHOD_NONE;
73                 reset_context.reset_req_dev = adev;
74                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
75
76                 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
77                 if (r)
78                         DRM_ERROR("GPU Recovery Failed: %d\n", r);
79         } else {
80                 drm_sched_suspend_timeout(&ring->sched);
81                 if (amdgpu_sriov_vf(adev))
82                         adev->virt.tdr_debug = true;
83         }
84
85 exit:
86         adev->job_hang = false;
87         drm_dev_exit(idx);
88         return DRM_GPU_SCHED_STAT_NOMINAL;
89 }
90
91 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
92                      struct amdgpu_job **job, struct amdgpu_vm *vm)
93 {
94         if (num_ibs == 0)
95                 return -EINVAL;
96
97         *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
98         if (!*job)
99                 return -ENOMEM;
100
101         /*
102          * Initialize the scheduler to at least some ring so that we always
103          * have a pointer to adev.
104          */
105         (*job)->base.sched = &adev->rings[0]->sched;
106         (*job)->vm = vm;
107
108         amdgpu_sync_create(&(*job)->sync);
109         amdgpu_sync_create(&(*job)->sched_sync);
110         (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
111         (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
112
113         return 0;
114 }
115
116 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
117                 enum amdgpu_ib_pool_type pool_type,
118                 struct amdgpu_job **job)
119 {
120         int r;
121
122         r = amdgpu_job_alloc(adev, 1, job, NULL);
123         if (r)
124                 return r;
125
126         (*job)->num_ibs = 1;
127         r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
128         if (r)
129                 kfree(*job);
130
131         return r;
132 }
133
134 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
135                               struct amdgpu_bo *gws, struct amdgpu_bo *oa)
136 {
137         if (gds) {
138                 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
139                 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
140         }
141         if (gws) {
142                 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
143                 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
144         }
145         if (oa) {
146                 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
147                 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
148         }
149 }
150
151 void amdgpu_job_free_resources(struct amdgpu_job *job)
152 {
153         struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
154         struct dma_fence *f;
155         unsigned i;
156
157         /* use sched fence if available */
158         f = job->base.s_fence ? &job->base.s_fence->finished :  &job->hw_fence;
159         for (i = 0; i < job->num_ibs; ++i)
160                 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
161 }
162
163 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
164 {
165         struct amdgpu_job *job = to_amdgpu_job(s_job);
166
167         drm_sched_job_cleanup(s_job);
168
169         amdgpu_sync_free(&job->sync);
170         amdgpu_sync_free(&job->sched_sync);
171
172         dma_fence_put(&job->hw_fence);
173 }
174
175 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
176                                 struct amdgpu_job *leader)
177 {
178         struct dma_fence *fence = &leader->base.s_fence->scheduled;
179
180         WARN_ON(job->gang_submit);
181
182         /*
183          * Don't add a reference when we are the gang leader to avoid circle
184          * dependency.
185          */
186         if (job != leader)
187                 dma_fence_get(fence);
188         job->gang_submit = fence;
189 }
190
191 void amdgpu_job_free(struct amdgpu_job *job)
192 {
193         amdgpu_job_free_resources(job);
194         amdgpu_sync_free(&job->sync);
195         amdgpu_sync_free(&job->sched_sync);
196         if (job->gang_submit != &job->base.s_fence->scheduled)
197                 dma_fence_put(job->gang_submit);
198
199         if (!job->hw_fence.ops)
200                 kfree(job);
201         else
202                 dma_fence_put(&job->hw_fence);
203 }
204
205 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
206                       void *owner, struct dma_fence **f)
207 {
208         int r;
209
210         if (!f)
211                 return -EINVAL;
212
213         r = drm_sched_job_init(&job->base, entity, owner);
214         if (r)
215                 return r;
216
217         drm_sched_job_arm(&job->base);
218
219         *f = dma_fence_get(&job->base.s_fence->finished);
220         amdgpu_job_free_resources(job);
221         drm_sched_entity_push_job(&job->base);
222
223         return 0;
224 }
225
226 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
227                              struct dma_fence **fence)
228 {
229         int r;
230
231         job->base.sched = &ring->sched;
232         r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
233
234         if (r)
235                 return r;
236
237         amdgpu_job_free(job);
238         return 0;
239 }
240
241 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
242                                                struct drm_sched_entity *s_entity)
243 {
244         struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
245         struct amdgpu_job *job = to_amdgpu_job(sched_job);
246         struct amdgpu_vm *vm = job->vm;
247         struct dma_fence *fence;
248         int r;
249
250         fence = amdgpu_sync_get_fence(&job->sync);
251         if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
252                 r = amdgpu_sync_fence(&job->sched_sync, fence);
253                 if (r)
254                         DRM_ERROR("Error adding fence (%d)\n", r);
255         }
256
257         while (fence == NULL && vm && !job->vmid) {
258                 r = amdgpu_vmid_grab(vm, ring, &job->sync,
259                                      &job->base.s_fence->finished,
260                                      job);
261                 if (r)
262                         DRM_ERROR("Error getting VM ID (%d)\n", r);
263
264                 fence = amdgpu_sync_get_fence(&job->sync);
265         }
266
267         if (!fence && job->gang_submit)
268                 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
269
270         return fence;
271 }
272
273 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
274 {
275         struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
276         struct amdgpu_device *adev = ring->adev;
277         struct dma_fence *fence = NULL, *finished;
278         struct amdgpu_job *job;
279         int r = 0;
280
281         job = to_amdgpu_job(sched_job);
282         finished = &job->base.s_fence->finished;
283
284         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
285
286         trace_amdgpu_sched_run_job(job);
287
288         /* Skip job if VRAM is lost and never resubmit gangs */
289         if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
290             (job->job_run_counter && job->gang_submit))
291                 dma_fence_set_error(finished, -ECANCELED);
292
293         if (finished->error < 0) {
294                 DRM_INFO("Skip scheduling IBs!\n");
295         } else {
296                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
297                                        &fence);
298                 if (r)
299                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
300         }
301
302         job->job_run_counter++;
303         amdgpu_job_free_resources(job);
304
305         fence = r ? ERR_PTR(r) : fence;
306         return fence;
307 }
308
309 #define to_drm_sched_job(sched_job)             \
310                 container_of((sched_job), struct drm_sched_job, queue_node)
311
312 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
313 {
314         struct drm_sched_job *s_job;
315         struct drm_sched_entity *s_entity = NULL;
316         int i;
317
318         /* Signal all jobs not yet scheduled */
319         for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
320                 struct drm_sched_rq *rq = &sched->sched_rq[i];
321                 spin_lock(&rq->lock);
322                 list_for_each_entry(s_entity, &rq->entities, list) {
323                         while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
324                                 struct drm_sched_fence *s_fence = s_job->s_fence;
325
326                                 dma_fence_signal(&s_fence->scheduled);
327                                 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
328                                 dma_fence_signal(&s_fence->finished);
329                         }
330                 }
331                 spin_unlock(&rq->lock);
332         }
333
334         /* Signal all jobs already scheduled to HW */
335         list_for_each_entry(s_job, &sched->pending_list, list) {
336                 struct drm_sched_fence *s_fence = s_job->s_fence;
337
338                 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
339                 dma_fence_signal(&s_fence->finished);
340         }
341 }
342
343 const struct drm_sched_backend_ops amdgpu_sched_ops = {
344         .dependency = amdgpu_job_dependency,
345         .run_job = amdgpu_job_run,
346         .timedout_job = amdgpu_job_timedout,
347         .free_job = amdgpu_job_free_cb
348 };
This page took 0.049162 seconds and 4 git commands to generate.