2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/drm_auth.h>
28 #include "amdgpu_sched.h"
29 #include "amdgpu_ras.h"
31 #define to_amdgpu_ctx_entity(e) \
32 container_of((e), struct amdgpu_ctx_entity, entity)
34 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
35 [AMDGPU_HW_IP_GFX] = 1,
36 [AMDGPU_HW_IP_COMPUTE] = 4,
37 [AMDGPU_HW_IP_DMA] = 2,
38 [AMDGPU_HW_IP_UVD] = 1,
39 [AMDGPU_HW_IP_VCE] = 1,
40 [AMDGPU_HW_IP_UVD_ENC] = 1,
41 [AMDGPU_HW_IP_VCN_DEC] = 1,
42 [AMDGPU_HW_IP_VCN_ENC] = 1,
43 [AMDGPU_HW_IP_VCN_JPEG] = 1,
46 static int amdgput_ctx_total_num_entities(void)
48 unsigned i, num_entities = 0;
50 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
51 num_entities += amdgpu_ctx_num_entities[i];
56 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
57 enum drm_sched_priority priority)
59 /* NORMAL and below are accessible by everyone */
60 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
63 if (capable(CAP_SYS_NICE))
66 if (drm_is_current_master(filp))
72 static int amdgpu_ctx_init(struct amdgpu_device *adev,
73 enum drm_sched_priority priority,
74 struct drm_file *filp,
75 struct amdgpu_ctx *ctx)
77 unsigned num_entities = amdgput_ctx_total_num_entities();
81 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
84 r = amdgpu_ctx_priority_permit(filp, priority);
88 memset(ctx, 0, sizeof(*ctx));
91 ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
92 sizeof(struct dma_fence*), GFP_KERNEL);
96 ctx->entities[0] = kcalloc(num_entities,
97 sizeof(struct amdgpu_ctx_entity),
99 if (!ctx->entities[0]) {
101 goto error_free_fences;
104 for (i = 0; i < num_entities; ++i) {
105 struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
107 entity->sequence = 1;
108 entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
110 for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
111 ctx->entities[i] = ctx->entities[i - 1] +
112 amdgpu_ctx_num_entities[i - 1];
114 kref_init(&ctx->refcount);
115 spin_lock_init(&ctx->ring_lock);
116 mutex_init(&ctx->lock);
118 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
119 ctx->reset_counter_query = ctx->reset_counter;
120 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
121 ctx->init_priority = priority;
122 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
124 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
125 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
126 struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
128 unsigned num_rqs = 0;
131 case AMDGPU_HW_IP_GFX:
132 rings[0] = &adev->gfx.gfx_ring[0];
135 case AMDGPU_HW_IP_COMPUTE:
136 for (j = 0; j < adev->gfx.num_compute_rings; ++j)
137 rings[j] = &adev->gfx.compute_ring[j];
138 num_rings = adev->gfx.num_compute_rings;
140 case AMDGPU_HW_IP_DMA:
141 for (j = 0; j < adev->sdma.num_instances; ++j)
142 rings[j] = &adev->sdma.instance[j].ring;
143 num_rings = adev->sdma.num_instances;
145 case AMDGPU_HW_IP_UVD:
146 rings[0] = &adev->uvd.inst[0].ring;
149 case AMDGPU_HW_IP_VCE:
150 rings[0] = &adev->vce.ring[0];
153 case AMDGPU_HW_IP_UVD_ENC:
154 rings[0] = &adev->uvd.inst[0].ring_enc[0];
157 case AMDGPU_HW_IP_VCN_DEC:
158 rings[0] = &adev->vcn.ring_dec;
161 case AMDGPU_HW_IP_VCN_ENC:
162 rings[0] = &adev->vcn.ring_enc[0];
165 case AMDGPU_HW_IP_VCN_JPEG:
166 rings[0] = &adev->vcn.ring_jpeg;
171 for (j = 0; j < num_rings; ++j) {
175 rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
178 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
179 r = drm_sched_entity_init(&ctx->entities[i][j].entity,
180 rqs, num_rqs, &ctx->guilty);
182 goto error_cleanup_entities;
187 error_cleanup_entities:
188 for (i = 0; i < num_entities; ++i)
189 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
190 kfree(ctx->entities[0]);
198 static void amdgpu_ctx_fini(struct kref *ref)
200 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
201 unsigned num_entities = amdgput_ctx_total_num_entities();
202 struct amdgpu_device *adev = ctx->adev;
208 for (i = 0; i < num_entities; ++i)
209 for (j = 0; j < amdgpu_sched_jobs; ++j)
210 dma_fence_put(ctx->entities[0][i].fences[j]);
212 kfree(ctx->entities[0]);
214 mutex_destroy(&ctx->lock);
219 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
220 u32 ring, struct drm_sched_entity **entity)
222 if (hw_ip >= AMDGPU_HW_IP_NUM) {
223 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
227 /* Right now all IPs have only one instance - multiple rings. */
229 DRM_DEBUG("invalid ip instance: %d\n", instance);
233 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
234 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
238 *entity = &ctx->entities[hw_ip][ring].entity;
242 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
243 struct amdgpu_fpriv *fpriv,
244 struct drm_file *filp,
245 enum drm_sched_priority priority,
248 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
249 struct amdgpu_ctx *ctx;
252 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
256 mutex_lock(&mgr->lock);
257 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
259 mutex_unlock(&mgr->lock);
265 r = amdgpu_ctx_init(adev, priority, filp, ctx);
267 idr_remove(&mgr->ctx_handles, *id);
271 mutex_unlock(&mgr->lock);
275 static void amdgpu_ctx_do_release(struct kref *ref)
277 struct amdgpu_ctx *ctx;
278 unsigned num_entities;
281 ctx = container_of(ref, struct amdgpu_ctx, refcount);
284 for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
285 num_entities += amdgpu_ctx_num_entities[i];
287 for (i = 0; i < num_entities; i++)
288 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
290 amdgpu_ctx_fini(ref);
293 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
295 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
296 struct amdgpu_ctx *ctx;
298 mutex_lock(&mgr->lock);
299 ctx = idr_remove(&mgr->ctx_handles, id);
301 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
302 mutex_unlock(&mgr->lock);
303 return ctx ? 0 : -EINVAL;
306 static int amdgpu_ctx_query(struct amdgpu_device *adev,
307 struct amdgpu_fpriv *fpriv, uint32_t id,
308 union drm_amdgpu_ctx_out *out)
310 struct amdgpu_ctx *ctx;
311 struct amdgpu_ctx_mgr *mgr;
312 unsigned reset_counter;
317 mgr = &fpriv->ctx_mgr;
318 mutex_lock(&mgr->lock);
319 ctx = idr_find(&mgr->ctx_handles, id);
321 mutex_unlock(&mgr->lock);
325 /* TODO: these two are always zero */
326 out->state.flags = 0x0;
327 out->state.hangs = 0x0;
329 /* determine if a GPU reset has occured since the last call */
330 reset_counter = atomic_read(&adev->gpu_reset_counter);
331 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
332 if (ctx->reset_counter_query == reset_counter)
333 out->state.reset_status = AMDGPU_CTX_NO_RESET;
335 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
336 ctx->reset_counter_query = reset_counter;
338 mutex_unlock(&mgr->lock);
342 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
343 struct amdgpu_fpriv *fpriv, uint32_t id,
344 union drm_amdgpu_ctx_out *out)
346 struct amdgpu_ctx *ctx;
347 struct amdgpu_ctx_mgr *mgr;
348 uint32_t ras_counter;
353 mgr = &fpriv->ctx_mgr;
354 mutex_lock(&mgr->lock);
355 ctx = idr_find(&mgr->ctx_handles, id);
357 mutex_unlock(&mgr->lock);
361 out->state.flags = 0x0;
362 out->state.hangs = 0x0;
364 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
365 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
367 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
368 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
370 if (atomic_read(&ctx->guilty))
371 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
374 ras_counter = amdgpu_ras_query_error_count(adev, false);
375 /*ras counter is monotonic increasing*/
376 if (ras_counter != ctx->ras_counter_ue) {
377 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
378 ctx->ras_counter_ue = ras_counter;
382 ras_counter = amdgpu_ras_query_error_count(adev, true);
383 if (ras_counter != ctx->ras_counter_ce) {
384 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
385 ctx->ras_counter_ce = ras_counter;
388 mutex_unlock(&mgr->lock);
392 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *filp)
397 enum drm_sched_priority priority;
399 union drm_amdgpu_ctx *args = data;
400 struct amdgpu_device *adev = dev->dev_private;
401 struct amdgpu_fpriv *fpriv = filp->driver_priv;
404 id = args->in.ctx_id;
405 priority = amdgpu_to_sched_priority(args->in.priority);
407 /* For backwards compatibility reasons, we need to accept
408 * ioctls with garbage in the priority field */
409 if (priority == DRM_SCHED_PRIORITY_INVALID)
410 priority = DRM_SCHED_PRIORITY_NORMAL;
412 switch (args->in.op) {
413 case AMDGPU_CTX_OP_ALLOC_CTX:
414 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
415 args->out.alloc.ctx_id = id;
417 case AMDGPU_CTX_OP_FREE_CTX:
418 r = amdgpu_ctx_free(fpriv, id);
420 case AMDGPU_CTX_OP_QUERY_STATE:
421 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
423 case AMDGPU_CTX_OP_QUERY_STATE2:
424 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
433 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
435 struct amdgpu_ctx *ctx;
436 struct amdgpu_ctx_mgr *mgr;
441 mgr = &fpriv->ctx_mgr;
443 mutex_lock(&mgr->lock);
444 ctx = idr_find(&mgr->ctx_handles, id);
446 kref_get(&ctx->refcount);
447 mutex_unlock(&mgr->lock);
451 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
456 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
460 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
461 struct drm_sched_entity *entity,
462 struct dma_fence *fence, uint64_t* handle)
464 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
465 uint64_t seq = centity->sequence;
466 struct dma_fence *other = NULL;
469 idx = seq & (amdgpu_sched_jobs - 1);
470 other = centity->fences[idx];
472 BUG_ON(!dma_fence_is_signaled(other));
474 dma_fence_get(fence);
476 spin_lock(&ctx->ring_lock);
477 centity->fences[idx] = fence;
479 spin_unlock(&ctx->ring_lock);
481 dma_fence_put(other);
486 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
487 struct drm_sched_entity *entity,
490 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
491 struct dma_fence *fence;
493 spin_lock(&ctx->ring_lock);
496 seq = centity->sequence - 1;
498 if (seq >= centity->sequence) {
499 spin_unlock(&ctx->ring_lock);
500 return ERR_PTR(-EINVAL);
504 if (seq + amdgpu_sched_jobs < centity->sequence) {
505 spin_unlock(&ctx->ring_lock);
509 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
510 spin_unlock(&ctx->ring_lock);
515 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
516 enum drm_sched_priority priority)
518 unsigned num_entities = amdgput_ctx_total_num_entities();
519 enum drm_sched_priority ctx_prio;
522 ctx->override_priority = priority;
524 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
525 ctx->init_priority : ctx->override_priority;
527 for (i = 0; i < num_entities; i++) {
528 struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
530 drm_sched_entity_set_priority(entity, ctx_prio);
534 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
535 struct drm_sched_entity *entity)
537 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
538 unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
539 struct dma_fence *other = centity->fences[idx];
543 r = dma_fence_wait(other, true);
545 if (r != -ERESTARTSYS)
546 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
555 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
557 mutex_init(&mgr->lock);
558 idr_init(&mgr->ctx_handles);
561 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
563 unsigned num_entities = amdgput_ctx_total_num_entities();
564 struct amdgpu_ctx *ctx;
568 idp = &mgr->ctx_handles;
570 mutex_lock(&mgr->lock);
571 idr_for_each_entry(idp, ctx, id) {
572 for (i = 0; i < num_entities; i++) {
573 struct drm_sched_entity *entity;
575 entity = &ctx->entities[0][i].entity;
576 timeout = drm_sched_entity_flush(entity, timeout);
579 mutex_unlock(&mgr->lock);
583 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
585 unsigned num_entities = amdgput_ctx_total_num_entities();
586 struct amdgpu_ctx *ctx;
590 idp = &mgr->ctx_handles;
592 idr_for_each_entry(idp, ctx, id) {
593 if (kref_read(&ctx->refcount) != 1) {
594 DRM_ERROR("ctx %p is still alive\n", ctx);
598 for (i = 0; i < num_entities; i++)
599 drm_sched_entity_fini(&ctx->entities[0][i].entity);
603 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
605 struct amdgpu_ctx *ctx;
609 amdgpu_ctx_mgr_entity_fini(mgr);
611 idp = &mgr->ctx_handles;
613 idr_for_each_entry(idp, ctx, id) {
614 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
615 DRM_ERROR("ctx %p is still alive\n", ctx);
618 idr_destroy(&mgr->ctx_handles);
619 mutex_destroy(&mgr->lock);