2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <drm/drm_auth.h>
27 #include "amdgpu_sched.h"
28 #include "amdgpu_ras.h"
29 #include <linux/nospec.h>
31 #define to_amdgpu_ctx_entity(e) \
32 container_of((e), struct amdgpu_ctx_entity, entity)
34 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
35 [AMDGPU_HW_IP_GFX] = 1,
36 [AMDGPU_HW_IP_COMPUTE] = 4,
37 [AMDGPU_HW_IP_DMA] = 2,
38 [AMDGPU_HW_IP_UVD] = 1,
39 [AMDGPU_HW_IP_VCE] = 1,
40 [AMDGPU_HW_IP_UVD_ENC] = 1,
41 [AMDGPU_HW_IP_VCN_DEC] = 1,
42 [AMDGPU_HW_IP_VCN_ENC] = 1,
43 [AMDGPU_HW_IP_VCN_JPEG] = 1,
46 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
47 enum drm_sched_priority priority)
49 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
52 /* NORMAL and below are accessible by everyone */
53 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
56 if (capable(CAP_SYS_NICE))
59 if (drm_is_current_master(filp))
65 static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
68 case DRM_SCHED_PRIORITY_HIGH_HW:
69 case DRM_SCHED_PRIORITY_KERNEL:
70 return AMDGPU_GFX_PIPE_PRIO_HIGH;
72 return AMDGPU_GFX_PIPE_PRIO_NORMAL;
76 static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
77 enum drm_sched_priority prio,
82 hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
83 amdgpu_ctx_sched_prio_to_compute_prio(prio) :
84 AMDGPU_RING_PRIO_DEFAULT;
85 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
86 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
87 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
92 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
95 struct amdgpu_device *adev = ctx->adev;
96 struct amdgpu_ctx_entity *entity;
97 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
98 unsigned num_scheds = 0;
100 enum drm_sched_priority priority;
103 entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
108 entity->sequence = 1;
109 priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
110 ctx->init_priority : ctx->override_priority;
111 hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
113 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
114 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
115 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
117 if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
118 sched = drm_sched_pick_best(scheds, num_scheds);
123 r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
126 goto error_free_entity;
128 ctx->entities[hw_ip][ring] = entity;
137 static int amdgpu_ctx_init(struct amdgpu_device *adev,
138 enum drm_sched_priority priority,
139 struct drm_file *filp,
140 struct amdgpu_ctx *ctx)
144 r = amdgpu_ctx_priority_permit(filp, priority);
148 memset(ctx, 0, sizeof(*ctx));
152 kref_init(&ctx->refcount);
153 spin_lock_init(&ctx->ring_lock);
154 mutex_init(&ctx->lock);
156 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
157 ctx->reset_counter_query = ctx->reset_counter;
158 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
159 ctx->init_priority = priority;
160 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
165 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
173 for (i = 0; i < amdgpu_sched_jobs; ++i)
174 dma_fence_put(entity->fences[i]);
179 static void amdgpu_ctx_fini(struct kref *ref)
181 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
182 struct amdgpu_device *adev = ctx->adev;
188 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
189 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
190 amdgpu_ctx_fini_entity(ctx->entities[i][j]);
191 ctx->entities[i][j] = NULL;
195 mutex_destroy(&ctx->lock);
199 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
200 u32 ring, struct drm_sched_entity **entity)
204 if (hw_ip >= AMDGPU_HW_IP_NUM) {
205 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
209 /* Right now all IPs have only one instance - multiple rings. */
211 DRM_DEBUG("invalid ip instance: %d\n", instance);
215 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
216 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
220 if (ctx->entities[hw_ip][ring] == NULL) {
221 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
226 *entity = &ctx->entities[hw_ip][ring]->entity;
230 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
231 struct amdgpu_fpriv *fpriv,
232 struct drm_file *filp,
233 enum drm_sched_priority priority,
236 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
237 struct amdgpu_ctx *ctx;
240 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
244 mutex_lock(&mgr->lock);
245 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
247 mutex_unlock(&mgr->lock);
253 r = amdgpu_ctx_init(adev, priority, filp, ctx);
255 idr_remove(&mgr->ctx_handles, *id);
259 mutex_unlock(&mgr->lock);
263 static void amdgpu_ctx_do_release(struct kref *ref)
265 struct amdgpu_ctx *ctx;
268 ctx = container_of(ref, struct amdgpu_ctx, refcount);
269 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
270 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
271 if (!ctx->entities[i][j])
274 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
278 amdgpu_ctx_fini(ref);
281 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
283 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
284 struct amdgpu_ctx *ctx;
286 mutex_lock(&mgr->lock);
287 ctx = idr_remove(&mgr->ctx_handles, id);
289 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
290 mutex_unlock(&mgr->lock);
291 return ctx ? 0 : -EINVAL;
294 static int amdgpu_ctx_query(struct amdgpu_device *adev,
295 struct amdgpu_fpriv *fpriv, uint32_t id,
296 union drm_amdgpu_ctx_out *out)
298 struct amdgpu_ctx *ctx;
299 struct amdgpu_ctx_mgr *mgr;
300 unsigned reset_counter;
305 mgr = &fpriv->ctx_mgr;
306 mutex_lock(&mgr->lock);
307 ctx = idr_find(&mgr->ctx_handles, id);
309 mutex_unlock(&mgr->lock);
313 /* TODO: these two are always zero */
314 out->state.flags = 0x0;
315 out->state.hangs = 0x0;
317 /* determine if a GPU reset has occured since the last call */
318 reset_counter = atomic_read(&adev->gpu_reset_counter);
319 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
320 if (ctx->reset_counter_query == reset_counter)
321 out->state.reset_status = AMDGPU_CTX_NO_RESET;
323 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
324 ctx->reset_counter_query = reset_counter;
326 mutex_unlock(&mgr->lock);
330 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
331 struct amdgpu_fpriv *fpriv, uint32_t id,
332 union drm_amdgpu_ctx_out *out)
334 struct amdgpu_ctx *ctx;
335 struct amdgpu_ctx_mgr *mgr;
336 unsigned long ras_counter;
341 mgr = &fpriv->ctx_mgr;
342 mutex_lock(&mgr->lock);
343 ctx = idr_find(&mgr->ctx_handles, id);
345 mutex_unlock(&mgr->lock);
349 out->state.flags = 0x0;
350 out->state.hangs = 0x0;
352 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
353 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
355 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
356 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
358 if (atomic_read(&ctx->guilty))
359 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
362 ras_counter = amdgpu_ras_query_error_count(adev, false);
363 /*ras counter is monotonic increasing*/
364 if (ras_counter != ctx->ras_counter_ue) {
365 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
366 ctx->ras_counter_ue = ras_counter;
370 ras_counter = amdgpu_ras_query_error_count(adev, true);
371 if (ras_counter != ctx->ras_counter_ce) {
372 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
373 ctx->ras_counter_ce = ras_counter;
376 mutex_unlock(&mgr->lock);
380 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
381 struct drm_file *filp)
385 enum drm_sched_priority priority;
387 union drm_amdgpu_ctx *args = data;
388 struct amdgpu_device *adev = dev->dev_private;
389 struct amdgpu_fpriv *fpriv = filp->driver_priv;
392 id = args->in.ctx_id;
393 priority = amdgpu_to_sched_priority(args->in.priority);
395 /* For backwards compatibility reasons, we need to accept
396 * ioctls with garbage in the priority field */
397 if (priority == DRM_SCHED_PRIORITY_INVALID)
398 priority = DRM_SCHED_PRIORITY_NORMAL;
400 switch (args->in.op) {
401 case AMDGPU_CTX_OP_ALLOC_CTX:
402 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
403 args->out.alloc.ctx_id = id;
405 case AMDGPU_CTX_OP_FREE_CTX:
406 r = amdgpu_ctx_free(fpriv, id);
408 case AMDGPU_CTX_OP_QUERY_STATE:
409 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
411 case AMDGPU_CTX_OP_QUERY_STATE2:
412 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
421 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
423 struct amdgpu_ctx *ctx;
424 struct amdgpu_ctx_mgr *mgr;
429 mgr = &fpriv->ctx_mgr;
431 mutex_lock(&mgr->lock);
432 ctx = idr_find(&mgr->ctx_handles, id);
434 kref_get(&ctx->refcount);
435 mutex_unlock(&mgr->lock);
439 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
444 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
448 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
449 struct drm_sched_entity *entity,
450 struct dma_fence *fence, uint64_t* handle)
452 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
453 uint64_t seq = centity->sequence;
454 struct dma_fence *other = NULL;
457 idx = seq & (amdgpu_sched_jobs - 1);
458 other = centity->fences[idx];
460 BUG_ON(!dma_fence_is_signaled(other));
462 dma_fence_get(fence);
464 spin_lock(&ctx->ring_lock);
465 centity->fences[idx] = fence;
467 spin_unlock(&ctx->ring_lock);
469 dma_fence_put(other);
474 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
475 struct drm_sched_entity *entity,
478 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
479 struct dma_fence *fence;
481 spin_lock(&ctx->ring_lock);
484 seq = centity->sequence - 1;
486 if (seq >= centity->sequence) {
487 spin_unlock(&ctx->ring_lock);
488 return ERR_PTR(-EINVAL);
492 if (seq + amdgpu_sched_jobs < centity->sequence) {
493 spin_unlock(&ctx->ring_lock);
497 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
498 spin_unlock(&ctx->ring_lock);
503 static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
504 struct amdgpu_ctx_entity *aentity,
506 enum drm_sched_priority priority)
508 struct amdgpu_device *adev = ctx->adev;
509 unsigned int hw_prio;
510 struct drm_gpu_scheduler **scheds = NULL;
513 /* set sw priority */
514 drm_sched_entity_set_priority(&aentity->entity, priority);
516 /* set hw priority */
517 if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
518 hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
519 AMDGPU_HW_IP_COMPUTE);
520 hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
521 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
522 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
523 drm_sched_entity_modify_sched(&aentity->entity, scheds,
528 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
529 enum drm_sched_priority priority)
531 enum drm_sched_priority ctx_prio;
534 ctx->override_priority = priority;
536 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
537 ctx->init_priority : ctx->override_priority;
538 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
539 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
540 if (!ctx->entities[i][j])
543 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
549 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
550 struct drm_sched_entity *entity)
552 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
553 struct dma_fence *other;
557 spin_lock(&ctx->ring_lock);
558 idx = centity->sequence & (amdgpu_sched_jobs - 1);
559 other = dma_fence_get(centity->fences[idx]);
560 spin_unlock(&ctx->ring_lock);
565 r = dma_fence_wait(other, true);
566 if (r < 0 && r != -ERESTARTSYS)
567 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
569 dma_fence_put(other);
573 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
575 mutex_init(&mgr->lock);
576 idr_init(&mgr->ctx_handles);
579 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
581 struct amdgpu_ctx *ctx;
585 idp = &mgr->ctx_handles;
587 mutex_lock(&mgr->lock);
588 idr_for_each_entry(idp, ctx, id) {
589 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
590 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
591 struct drm_sched_entity *entity;
593 if (!ctx->entities[i][j])
596 entity = &ctx->entities[i][j]->entity;
597 timeout = drm_sched_entity_flush(entity, timeout);
601 mutex_unlock(&mgr->lock);
605 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
607 struct amdgpu_ctx *ctx;
611 idp = &mgr->ctx_handles;
613 idr_for_each_entry(idp, ctx, id) {
614 if (kref_read(&ctx->refcount) != 1) {
615 DRM_ERROR("ctx %p is still alive\n", ctx);
619 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
620 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
621 struct drm_sched_entity *entity;
623 if (!ctx->entities[i][j])
626 entity = &ctx->entities[i][j]->entity;
627 drm_sched_entity_fini(entity);
633 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
635 struct amdgpu_ctx *ctx;
639 amdgpu_ctx_mgr_entity_fini(mgr);
641 idp = &mgr->ctx_handles;
643 idr_for_each_entry(idp, ctx, id) {
644 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
645 DRM_ERROR("ctx %p is still alive\n", ctx);
648 idr_destroy(&mgr->ctx_handles);
649 mutex_destroy(&mgr->lock);