]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
Merge branch 'work.afs' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <[email protected]>
23  */
24
25 #include <drm/drmP.h>
26 #include <drm/drm_auth.h>
27 #include "amdgpu.h"
28 #include "amdgpu_sched.h"
29
30 #define to_amdgpu_ctx_entity(e) \
31         container_of((e), struct amdgpu_ctx_entity, entity)
32
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34         [AMDGPU_HW_IP_GFX]      =       1,
35         [AMDGPU_HW_IP_COMPUTE]  =       4,
36         [AMDGPU_HW_IP_DMA]      =       2,
37         [AMDGPU_HW_IP_UVD]      =       1,
38         [AMDGPU_HW_IP_VCE]      =       1,
39         [AMDGPU_HW_IP_UVD_ENC]  =       1,
40         [AMDGPU_HW_IP_VCN_DEC]  =       1,
41         [AMDGPU_HW_IP_VCN_ENC]  =       1,
42 };
43
44 static int amdgput_ctx_total_num_entities(void)
45 {
46         unsigned i, num_entities = 0;
47
48         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
49                 num_entities += amdgpu_ctx_num_entities[i];
50
51         return num_entities;
52 }
53
54 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
55                                       enum drm_sched_priority priority)
56 {
57         /* NORMAL and below are accessible by everyone */
58         if (priority <= DRM_SCHED_PRIORITY_NORMAL)
59                 return 0;
60
61         if (capable(CAP_SYS_NICE))
62                 return 0;
63
64         if (drm_is_current_master(filp))
65                 return 0;
66
67         return -EACCES;
68 }
69
70 static int amdgpu_ctx_init(struct amdgpu_device *adev,
71                            enum drm_sched_priority priority,
72                            struct drm_file *filp,
73                            struct amdgpu_ctx *ctx)
74 {
75         unsigned num_entities = amdgput_ctx_total_num_entities();
76         unsigned i, j;
77         int r;
78
79         if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
80                 return -EINVAL;
81
82         r = amdgpu_ctx_priority_permit(filp, priority);
83         if (r)
84                 return r;
85
86         memset(ctx, 0, sizeof(*ctx));
87         ctx->adev = adev;
88
89         ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
90                               sizeof(struct dma_fence*), GFP_KERNEL);
91         if (!ctx->fences)
92                 return -ENOMEM;
93
94         ctx->entities[0] = kcalloc(num_entities,
95                                    sizeof(struct amdgpu_ctx_entity),
96                                    GFP_KERNEL);
97         if (!ctx->entities[0]) {
98                 r = -ENOMEM;
99                 goto error_free_fences;
100         }
101
102         for (i = 0; i < num_entities; ++i) {
103                 struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
104
105                 entity->sequence = 1;
106                 entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
107         }
108         for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
109                 ctx->entities[i] = ctx->entities[i - 1] +
110                         amdgpu_ctx_num_entities[i - 1];
111
112         kref_init(&ctx->refcount);
113         spin_lock_init(&ctx->ring_lock);
114         mutex_init(&ctx->lock);
115
116         ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
117         ctx->reset_counter_query = ctx->reset_counter;
118         ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
119         ctx->init_priority = priority;
120         ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
121
122         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
123                 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
124                 struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
125                 unsigned num_rings;
126
127                 switch (i) {
128                 case AMDGPU_HW_IP_GFX:
129                         rings[0] = &adev->gfx.gfx_ring[0];
130                         num_rings = 1;
131                         break;
132                 case AMDGPU_HW_IP_COMPUTE:
133                         for (j = 0; j < adev->gfx.num_compute_rings; ++j)
134                                 rings[j] = &adev->gfx.compute_ring[j];
135                         num_rings = adev->gfx.num_compute_rings;
136                         break;
137                 case AMDGPU_HW_IP_DMA:
138                         for (j = 0; j < adev->sdma.num_instances; ++j)
139                                 rings[j] = &adev->sdma.instance[j].ring;
140                         num_rings = adev->sdma.num_instances;
141                         break;
142                 case AMDGPU_HW_IP_UVD:
143                         rings[0] = &adev->uvd.inst[0].ring;
144                         num_rings = 1;
145                         break;
146                 case AMDGPU_HW_IP_VCE:
147                         rings[0] = &adev->vce.ring[0];
148                         num_rings = 1;
149                         break;
150                 case AMDGPU_HW_IP_UVD_ENC:
151                         rings[0] = &adev->uvd.inst[0].ring_enc[0];
152                         num_rings = 1;
153                         break;
154                 case AMDGPU_HW_IP_VCN_DEC:
155                         rings[0] = &adev->vcn.ring_dec;
156                         num_rings = 1;
157                         break;
158                 case AMDGPU_HW_IP_VCN_ENC:
159                         rings[0] = &adev->vcn.ring_enc[0];
160                         num_rings = 1;
161                         break;
162                 case AMDGPU_HW_IP_VCN_JPEG:
163                         rings[0] = &adev->vcn.ring_jpeg;
164                         num_rings = 1;
165                         break;
166                 }
167
168                 for (j = 0; j < num_rings; ++j)
169                         rqs[j] = &rings[j]->sched.sched_rq[priority];
170
171                 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
172                         r = drm_sched_entity_init(&ctx->entities[i][j].entity,
173                                                   rqs, num_rings, &ctx->guilty);
174                 if (r)
175                         goto error_cleanup_entities;
176         }
177
178         return 0;
179
180 error_cleanup_entities:
181         for (i = 0; i < num_entities; ++i)
182                 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
183         kfree(ctx->entities[0]);
184
185 error_free_fences:
186         kfree(ctx->fences);
187         ctx->fences = NULL;
188         return r;
189 }
190
191 static void amdgpu_ctx_fini(struct kref *ref)
192 {
193         struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
194         unsigned num_entities = amdgput_ctx_total_num_entities();
195         struct amdgpu_device *adev = ctx->adev;
196         unsigned i, j;
197
198         if (!adev)
199                 return;
200
201         for (i = 0; i < num_entities; ++i)
202                 for (j = 0; j < amdgpu_sched_jobs; ++j)
203                         dma_fence_put(ctx->entities[0][i].fences[j]);
204         kfree(ctx->fences);
205         kfree(ctx->entities[0]);
206
207         mutex_destroy(&ctx->lock);
208
209         kfree(ctx);
210 }
211
212 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
213                           u32 ring, struct drm_sched_entity **entity)
214 {
215         if (hw_ip >= AMDGPU_HW_IP_NUM) {
216                 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
217                 return -EINVAL;
218         }
219
220         /* Right now all IPs have only one instance - multiple rings. */
221         if (instance != 0) {
222                 DRM_DEBUG("invalid ip instance: %d\n", instance);
223                 return -EINVAL;
224         }
225
226         if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
227                 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
228                 return -EINVAL;
229         }
230
231         *entity = &ctx->entities[hw_ip][ring].entity;
232         return 0;
233 }
234
235 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
236                             struct amdgpu_fpriv *fpriv,
237                             struct drm_file *filp,
238                             enum drm_sched_priority priority,
239                             uint32_t *id)
240 {
241         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
242         struct amdgpu_ctx *ctx;
243         int r;
244
245         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
246         if (!ctx)
247                 return -ENOMEM;
248
249         mutex_lock(&mgr->lock);
250         r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
251         if (r < 0) {
252                 mutex_unlock(&mgr->lock);
253                 kfree(ctx);
254                 return r;
255         }
256
257         *id = (uint32_t)r;
258         r = amdgpu_ctx_init(adev, priority, filp, ctx);
259         if (r) {
260                 idr_remove(&mgr->ctx_handles, *id);
261                 *id = 0;
262                 kfree(ctx);
263         }
264         mutex_unlock(&mgr->lock);
265         return r;
266 }
267
268 static void amdgpu_ctx_do_release(struct kref *ref)
269 {
270         struct amdgpu_ctx *ctx;
271         unsigned num_entities;
272         u32 i;
273
274         ctx = container_of(ref, struct amdgpu_ctx, refcount);
275
276         num_entities = 0;
277         for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
278                 num_entities += amdgpu_ctx_num_entities[i];
279
280         for (i = 0; i < num_entities; i++)
281                 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
282
283         amdgpu_ctx_fini(ref);
284 }
285
286 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
287 {
288         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
289         struct amdgpu_ctx *ctx;
290
291         mutex_lock(&mgr->lock);
292         ctx = idr_remove(&mgr->ctx_handles, id);
293         if (ctx)
294                 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
295         mutex_unlock(&mgr->lock);
296         return ctx ? 0 : -EINVAL;
297 }
298
299 static int amdgpu_ctx_query(struct amdgpu_device *adev,
300                             struct amdgpu_fpriv *fpriv, uint32_t id,
301                             union drm_amdgpu_ctx_out *out)
302 {
303         struct amdgpu_ctx *ctx;
304         struct amdgpu_ctx_mgr *mgr;
305         unsigned reset_counter;
306
307         if (!fpriv)
308                 return -EINVAL;
309
310         mgr = &fpriv->ctx_mgr;
311         mutex_lock(&mgr->lock);
312         ctx = idr_find(&mgr->ctx_handles, id);
313         if (!ctx) {
314                 mutex_unlock(&mgr->lock);
315                 return -EINVAL;
316         }
317
318         /* TODO: these two are always zero */
319         out->state.flags = 0x0;
320         out->state.hangs = 0x0;
321
322         /* determine if a GPU reset has occured since the last call */
323         reset_counter = atomic_read(&adev->gpu_reset_counter);
324         /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
325         if (ctx->reset_counter_query == reset_counter)
326                 out->state.reset_status = AMDGPU_CTX_NO_RESET;
327         else
328                 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
329         ctx->reset_counter_query = reset_counter;
330
331         mutex_unlock(&mgr->lock);
332         return 0;
333 }
334
335 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
336         struct amdgpu_fpriv *fpriv, uint32_t id,
337         union drm_amdgpu_ctx_out *out)
338 {
339         struct amdgpu_ctx *ctx;
340         struct amdgpu_ctx_mgr *mgr;
341
342         if (!fpriv)
343                 return -EINVAL;
344
345         mgr = &fpriv->ctx_mgr;
346         mutex_lock(&mgr->lock);
347         ctx = idr_find(&mgr->ctx_handles, id);
348         if (!ctx) {
349                 mutex_unlock(&mgr->lock);
350                 return -EINVAL;
351         }
352
353         out->state.flags = 0x0;
354         out->state.hangs = 0x0;
355
356         if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
357                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
358
359         if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
360                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
361
362         if (atomic_read(&ctx->guilty))
363                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
364
365         mutex_unlock(&mgr->lock);
366         return 0;
367 }
368
369 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
370                      struct drm_file *filp)
371 {
372         int r;
373         uint32_t id;
374         enum drm_sched_priority priority;
375
376         union drm_amdgpu_ctx *args = data;
377         struct amdgpu_device *adev = dev->dev_private;
378         struct amdgpu_fpriv *fpriv = filp->driver_priv;
379
380         r = 0;
381         id = args->in.ctx_id;
382         priority = amdgpu_to_sched_priority(args->in.priority);
383
384         /* For backwards compatibility reasons, we need to accept
385          * ioctls with garbage in the priority field */
386         if (priority == DRM_SCHED_PRIORITY_INVALID)
387                 priority = DRM_SCHED_PRIORITY_NORMAL;
388
389         switch (args->in.op) {
390         case AMDGPU_CTX_OP_ALLOC_CTX:
391                 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
392                 args->out.alloc.ctx_id = id;
393                 break;
394         case AMDGPU_CTX_OP_FREE_CTX:
395                 r = amdgpu_ctx_free(fpriv, id);
396                 break;
397         case AMDGPU_CTX_OP_QUERY_STATE:
398                 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
399                 break;
400         case AMDGPU_CTX_OP_QUERY_STATE2:
401                 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
402                 break;
403         default:
404                 return -EINVAL;
405         }
406
407         return r;
408 }
409
410 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
411 {
412         struct amdgpu_ctx *ctx;
413         struct amdgpu_ctx_mgr *mgr;
414
415         if (!fpriv)
416                 return NULL;
417
418         mgr = &fpriv->ctx_mgr;
419
420         mutex_lock(&mgr->lock);
421         ctx = idr_find(&mgr->ctx_handles, id);
422         if (ctx)
423                 kref_get(&ctx->refcount);
424         mutex_unlock(&mgr->lock);
425         return ctx;
426 }
427
428 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
429 {
430         if (ctx == NULL)
431                 return -EINVAL;
432
433         kref_put(&ctx->refcount, amdgpu_ctx_do_release);
434         return 0;
435 }
436
437 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
438                           struct drm_sched_entity *entity,
439                           struct dma_fence *fence, uint64_t* handle)
440 {
441         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
442         uint64_t seq = centity->sequence;
443         struct dma_fence *other = NULL;
444         unsigned idx = 0;
445
446         idx = seq & (amdgpu_sched_jobs - 1);
447         other = centity->fences[idx];
448         if (other)
449                 BUG_ON(!dma_fence_is_signaled(other));
450
451         dma_fence_get(fence);
452
453         spin_lock(&ctx->ring_lock);
454         centity->fences[idx] = fence;
455         centity->sequence++;
456         spin_unlock(&ctx->ring_lock);
457
458         dma_fence_put(other);
459         if (handle)
460                 *handle = seq;
461 }
462
463 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
464                                        struct drm_sched_entity *entity,
465                                        uint64_t seq)
466 {
467         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
468         struct dma_fence *fence;
469
470         spin_lock(&ctx->ring_lock);
471
472         if (seq == ~0ull)
473                 seq = centity->sequence - 1;
474
475         if (seq >= centity->sequence) {
476                 spin_unlock(&ctx->ring_lock);
477                 return ERR_PTR(-EINVAL);
478         }
479
480
481         if (seq + amdgpu_sched_jobs < centity->sequence) {
482                 spin_unlock(&ctx->ring_lock);
483                 return NULL;
484         }
485
486         fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
487         spin_unlock(&ctx->ring_lock);
488
489         return fence;
490 }
491
492 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
493                                   enum drm_sched_priority priority)
494 {
495         unsigned num_entities = amdgput_ctx_total_num_entities();
496         enum drm_sched_priority ctx_prio;
497         unsigned i;
498
499         ctx->override_priority = priority;
500
501         ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
502                         ctx->init_priority : ctx->override_priority;
503
504         for (i = 0; i < num_entities; i++) {
505                 struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
506
507                 drm_sched_entity_set_priority(entity, ctx_prio);
508         }
509 }
510
511 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
512                                struct drm_sched_entity *entity)
513 {
514         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
515         unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
516         struct dma_fence *other = centity->fences[idx];
517
518         if (other) {
519                 signed long r;
520                 r = dma_fence_wait(other, true);
521                 if (r < 0) {
522                         if (r != -ERESTARTSYS)
523                                 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
524
525                         return r;
526                 }
527         }
528
529         return 0;
530 }
531
532 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
533 {
534         mutex_init(&mgr->lock);
535         idr_init(&mgr->ctx_handles);
536 }
537
538 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
539 {
540         unsigned num_entities = amdgput_ctx_total_num_entities();
541         struct amdgpu_ctx *ctx;
542         struct idr *idp;
543         uint32_t id, i;
544         long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
545
546         idp = &mgr->ctx_handles;
547
548         mutex_lock(&mgr->lock);
549         idr_for_each_entry(idp, ctx, id) {
550
551                 if (!ctx->adev) {
552                         mutex_unlock(&mgr->lock);
553                         return;
554                 }
555
556                 for (i = 0; i < num_entities; i++) {
557                         struct drm_sched_entity *entity;
558
559                         entity = &ctx->entities[0][i].entity;
560                         max_wait = drm_sched_entity_flush(entity, max_wait);
561                 }
562         }
563         mutex_unlock(&mgr->lock);
564 }
565
566 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
567 {
568         unsigned num_entities = amdgput_ctx_total_num_entities();
569         struct amdgpu_ctx *ctx;
570         struct idr *idp;
571         uint32_t id, i;
572
573         idp = &mgr->ctx_handles;
574
575         idr_for_each_entry(idp, ctx, id) {
576
577                 if (!ctx->adev)
578                         return;
579
580                 if (kref_read(&ctx->refcount) != 1) {
581                         DRM_ERROR("ctx %p is still alive\n", ctx);
582                         continue;
583                 }
584
585                 for (i = 0; i < num_entities; i++)
586                         drm_sched_entity_fini(&ctx->entities[0][i].entity);
587         }
588 }
589
590 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
591 {
592         struct amdgpu_ctx *ctx;
593         struct idr *idp;
594         uint32_t id;
595
596         amdgpu_ctx_mgr_entity_fini(mgr);
597
598         idp = &mgr->ctx_handles;
599
600         idr_for_each_entry(idp, ctx, id) {
601                 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
602                         DRM_ERROR("ctx %p is still alive\n", ctx);
603         }
604
605         idr_destroy(&mgr->ctx_handles);
606         mutex_destroy(&mgr->lock);
607 }
This page took 0.067192 seconds and 4 git commands to generate.