]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
Merge drm/drm-next into drm-misc-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30
31 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32 {
33         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34         struct amdgpu_job *job = to_amdgpu_job(s_job);
35
36         DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
37                   job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
38                   ring->fence_drv.sync_seq);
39
40         amdgpu_device_gpu_recover(ring->adev, job, false);
41 }
42
43 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
44                      struct amdgpu_job **job, struct amdgpu_vm *vm)
45 {
46         size_t size = sizeof(struct amdgpu_job);
47
48         if (num_ibs == 0)
49                 return -EINVAL;
50
51         size += sizeof(struct amdgpu_ib) * num_ibs;
52
53         *job = kzalloc(size, GFP_KERNEL);
54         if (!*job)
55                 return -ENOMEM;
56
57         /*
58          * Initialize the scheduler to at least some ring so that we always
59          * have a pointer to adev.
60          */
61         (*job)->base.sched = &adev->rings[0]->sched;
62         (*job)->vm = vm;
63         (*job)->ibs = (void *)&(*job)[1];
64         (*job)->num_ibs = num_ibs;
65
66         amdgpu_sync_create(&(*job)->sync);
67         amdgpu_sync_create(&(*job)->sched_sync);
68         (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
69
70         return 0;
71 }
72
73 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
74                              struct amdgpu_job **job)
75 {
76         int r;
77
78         r = amdgpu_job_alloc(adev, 1, job, NULL);
79         if (r)
80                 return r;
81
82         r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
83         if (r)
84                 kfree(*job);
85         else
86                 (*job)->vm_pd_addr = adev->gart.table_addr;
87
88         return r;
89 }
90
91 void amdgpu_job_free_resources(struct amdgpu_job *job)
92 {
93         struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
94         struct dma_fence *f;
95         unsigned i;
96
97         /* use sched fence if available */
98         f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
99
100         for (i = 0; i < job->num_ibs; ++i)
101                 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
102 }
103
104 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
105 {
106         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
107         struct amdgpu_job *job = to_amdgpu_job(s_job);
108
109         amdgpu_ring_priority_put(ring, s_job->s_priority);
110         dma_fence_put(job->fence);
111         amdgpu_sync_free(&job->sync);
112         amdgpu_sync_free(&job->sched_sync);
113         kfree(job);
114 }
115
116 void amdgpu_job_free(struct amdgpu_job *job)
117 {
118         amdgpu_job_free_resources(job);
119
120         dma_fence_put(job->fence);
121         amdgpu_sync_free(&job->sync);
122         amdgpu_sync_free(&job->sched_sync);
123         kfree(job);
124 }
125
126 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
127                       void *owner, struct dma_fence **f)
128 {
129         enum drm_sched_priority priority;
130         struct amdgpu_ring *ring;
131         int r;
132
133         if (!f)
134                 return -EINVAL;
135
136         r = drm_sched_job_init(&job->base, entity, owner);
137         if (r)
138                 return r;
139
140         job->owner = owner;
141         *f = dma_fence_get(&job->base.s_fence->finished);
142         amdgpu_job_free_resources(job);
143         priority = job->base.s_priority;
144         drm_sched_entity_push_job(&job->base, entity);
145
146         ring = to_amdgpu_ring(entity->rq->sched);
147         amdgpu_ring_priority_get(ring, priority);
148
149         return 0;
150 }
151
152 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
153                              struct dma_fence **fence)
154 {
155         int r;
156
157         job->base.sched = &ring->sched;
158         r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
159         job->fence = dma_fence_get(*fence);
160         if (r)
161                 return r;
162
163         amdgpu_job_free(job);
164         return 0;
165 }
166
167 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
168                                                struct drm_sched_entity *s_entity)
169 {
170         struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
171         struct amdgpu_job *job = to_amdgpu_job(sched_job);
172         struct amdgpu_vm *vm = job->vm;
173         struct dma_fence *fence;
174         bool explicit = false;
175         int r;
176
177         fence = amdgpu_sync_get_fence(&job->sync, &explicit);
178         if (fence && explicit) {
179                 if (drm_sched_dependency_optimized(fence, s_entity)) {
180                         r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
181                                               fence, false);
182                         if (r)
183                                 DRM_ERROR("Error adding fence (%d)\n", r);
184                 }
185         }
186
187         while (fence == NULL && vm && !job->vmid) {
188                 r = amdgpu_vmid_grab(vm, ring, &job->sync,
189                                      &job->base.s_fence->finished,
190                                      job);
191                 if (r)
192                         DRM_ERROR("Error getting VM ID (%d)\n", r);
193
194                 fence = amdgpu_sync_get_fence(&job->sync, NULL);
195         }
196
197         return fence;
198 }
199
200 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
201 {
202         struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
203         struct dma_fence *fence = NULL, *finished;
204         struct amdgpu_job *job;
205         int r;
206
207         job = to_amdgpu_job(sched_job);
208         finished = &job->base.s_fence->finished;
209
210         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
211
212         trace_amdgpu_sched_run_job(job);
213
214         if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
215                 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
216
217         if (finished->error < 0) {
218                 DRM_INFO("Skip scheduling IBs!\n");
219         } else {
220                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
221                                        &fence);
222                 if (r)
223                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
224         }
225         /* if gpu reset, hw fence will be replaced here */
226         dma_fence_put(job->fence);
227         job->fence = dma_fence_get(fence);
228
229         amdgpu_job_free_resources(job);
230         return fence;
231 }
232
233 const struct drm_sched_backend_ops amdgpu_sched_ops = {
234         .dependency = amdgpu_job_dependency,
235         .run_job = amdgpu_job_run,
236         .timedout_job = amdgpu_job_timedout,
237         .free_job = amdgpu_job_free_cb
238 };
This page took 0.049188 seconds and 4 git commands to generate.