2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <drm/drm_auth.h>
26 #include <drm/drm_drv.h>
28 #include "amdgpu_sched.h"
29 #include "amdgpu_ras.h"
30 #include <linux/nospec.h>
32 #define to_amdgpu_ctx_entity(e) \
33 container_of((e), struct amdgpu_ctx_entity, entity)
35 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
36 [AMDGPU_HW_IP_GFX] = 1,
37 [AMDGPU_HW_IP_COMPUTE] = 4,
38 [AMDGPU_HW_IP_DMA] = 2,
39 [AMDGPU_HW_IP_UVD] = 1,
40 [AMDGPU_HW_IP_VCE] = 1,
41 [AMDGPU_HW_IP_UVD_ENC] = 1,
42 [AMDGPU_HW_IP_VCN_DEC] = 1,
43 [AMDGPU_HW_IP_VCN_ENC] = 1,
44 [AMDGPU_HW_IP_VCN_JPEG] = 1,
47 bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
50 case AMDGPU_CTX_PRIORITY_UNSET:
51 case AMDGPU_CTX_PRIORITY_VERY_LOW:
52 case AMDGPU_CTX_PRIORITY_LOW:
53 case AMDGPU_CTX_PRIORITY_NORMAL:
54 case AMDGPU_CTX_PRIORITY_HIGH:
55 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
62 static enum drm_sched_priority
63 amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
66 case AMDGPU_CTX_PRIORITY_UNSET:
67 return DRM_SCHED_PRIORITY_UNSET;
69 case AMDGPU_CTX_PRIORITY_VERY_LOW:
70 return DRM_SCHED_PRIORITY_MIN;
72 case AMDGPU_CTX_PRIORITY_LOW:
73 return DRM_SCHED_PRIORITY_MIN;
75 case AMDGPU_CTX_PRIORITY_NORMAL:
76 return DRM_SCHED_PRIORITY_NORMAL;
78 case AMDGPU_CTX_PRIORITY_HIGH:
79 return DRM_SCHED_PRIORITY_HIGH;
81 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
82 return DRM_SCHED_PRIORITY_HIGH;
84 /* This should not happen as we sanitized userspace provided priority
85 * already, WARN if this happens.
88 WARN(1, "Invalid context priority %d\n", ctx_prio);
89 return DRM_SCHED_PRIORITY_NORMAL;
94 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
97 if (!amdgpu_ctx_priority_is_valid(priority))
100 /* NORMAL and below are accessible by everyone */
101 if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
104 if (capable(CAP_SYS_NICE))
107 if (drm_is_current_master(filp))
113 static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
116 case AMDGPU_CTX_PRIORITY_HIGH:
117 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
118 return AMDGPU_GFX_PIPE_PRIO_HIGH;
120 return AMDGPU_GFX_PIPE_PRIO_NORMAL;
124 static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
127 case AMDGPU_CTX_PRIORITY_HIGH:
128 return AMDGPU_RING_PRIO_1;
129 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
130 return AMDGPU_RING_PRIO_2;
132 return AMDGPU_RING_PRIO_0;
136 static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
138 struct amdgpu_device *adev = ctx->mgr->adev;
139 unsigned int hw_prio;
142 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
143 ctx->init_priority : ctx->override_priority;
146 case AMDGPU_HW_IP_GFX:
147 case AMDGPU_HW_IP_COMPUTE:
148 hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
150 case AMDGPU_HW_IP_VCE:
151 case AMDGPU_HW_IP_VCN_ENC:
152 hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
155 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
159 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
160 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
161 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
166 /* Calculate the time spend on the hw */
167 static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
169 struct drm_sched_fence *s_fence;
172 return ns_to_ktime(0);
174 /* When the fence is not even scheduled it can't have spend time */
175 s_fence = to_drm_sched_fence(fence);
176 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
177 return ns_to_ktime(0);
179 /* When it is still running account how much already spend */
180 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
181 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
183 return ktime_sub(s_fence->finished.timestamp,
184 s_fence->scheduled.timestamp);
187 static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
188 struct amdgpu_ctx_entity *centity)
190 ktime_t res = ns_to_ktime(0);
193 spin_lock(&ctx->ring_lock);
194 for (i = 0; i < amdgpu_sched_jobs; i++) {
195 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
197 spin_unlock(&ctx->ring_lock);
201 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
204 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
205 struct amdgpu_device *adev = ctx->mgr->adev;
206 struct amdgpu_ctx_entity *entity;
207 enum drm_sched_priority drm_prio;
208 unsigned int hw_prio, num_scheds;
212 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
217 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
218 ctx->init_priority : ctx->override_priority;
219 entity->hw_ip = hw_ip;
220 entity->sequence = 1;
221 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
222 drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
224 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
225 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
226 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
228 /* disable load balance if the hw engine retains context among dependent jobs */
229 if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
230 hw_ip == AMDGPU_HW_IP_VCN_DEC ||
231 hw_ip == AMDGPU_HW_IP_UVD_ENC ||
232 hw_ip == AMDGPU_HW_IP_UVD) {
233 sched = drm_sched_pick_best(scheds, num_scheds);
238 r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
241 goto error_free_entity;
243 /* It's not an error if we fail to install the new entity */
244 if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
250 drm_sched_entity_fini(&entity->entity);
258 static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
260 ktime_t res = ns_to_ktime(0);
266 for (i = 0; i < amdgpu_sched_jobs; ++i) {
267 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
268 dma_fence_put(entity->fences[i]);
275 static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
276 struct drm_file *filp, struct amdgpu_ctx *ctx)
280 r = amdgpu_ctx_priority_permit(filp, priority);
284 memset(ctx, 0, sizeof(*ctx));
286 kref_init(&ctx->refcount);
288 spin_lock_init(&ctx->ring_lock);
289 mutex_init(&ctx->lock);
291 ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
292 ctx->reset_counter_query = ctx->reset_counter;
293 ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
294 ctx->init_priority = priority;
295 ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
296 ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
301 static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
304 struct amdgpu_device *adev = ctx->mgr->adev;
305 enum amd_dpm_forced_level current_level;
307 current_level = amdgpu_dpm_get_performance_level(adev);
309 switch (current_level) {
310 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
311 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
313 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
314 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
316 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
317 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
319 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
320 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
323 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
329 static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
332 struct amdgpu_device *adev = ctx->mgr->adev;
333 enum amd_dpm_forced_level level;
334 u32 current_stable_pstate;
337 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
338 if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
343 r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
344 if (r || (stable_pstate == current_stable_pstate))
347 switch (stable_pstate) {
348 case AMDGPU_CTX_STABLE_PSTATE_NONE:
349 level = AMD_DPM_FORCED_LEVEL_AUTO;
351 case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
352 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
354 case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
355 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
357 case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
358 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
360 case AMDGPU_CTX_STABLE_PSTATE_PEAK:
361 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
368 r = amdgpu_dpm_force_performance_level(adev, level);
370 if (level == AMD_DPM_FORCED_LEVEL_AUTO)
371 adev->pm.stable_pstate_ctx = NULL;
373 adev->pm.stable_pstate_ctx = ctx;
375 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
380 static void amdgpu_ctx_fini(struct kref *ref)
382 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
383 struct amdgpu_ctx_mgr *mgr = ctx->mgr;
384 struct amdgpu_device *adev = mgr->adev;
390 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
391 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
394 spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
395 atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
399 if (drm_dev_enter(&adev->ddev, &idx)) {
400 amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE);
404 mutex_destroy(&ctx->lock);
408 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
409 u32 ring, struct drm_sched_entity **entity)
413 if (hw_ip >= AMDGPU_HW_IP_NUM) {
414 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
418 /* Right now all IPs have only one instance - multiple rings. */
420 DRM_DEBUG("invalid ip instance: %d\n", instance);
424 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
425 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
429 if (ctx->entities[hw_ip][ring] == NULL) {
430 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
435 *entity = &ctx->entities[hw_ip][ring]->entity;
439 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
440 struct amdgpu_fpriv *fpriv,
441 struct drm_file *filp,
445 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
446 struct amdgpu_ctx *ctx;
449 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
453 mutex_lock(&mgr->lock);
454 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
456 mutex_unlock(&mgr->lock);
462 r = amdgpu_ctx_init(mgr, priority, filp, ctx);
464 idr_remove(&mgr->ctx_handles, *id);
468 mutex_unlock(&mgr->lock);
472 static void amdgpu_ctx_do_release(struct kref *ref)
474 struct amdgpu_ctx *ctx;
477 ctx = container_of(ref, struct amdgpu_ctx, refcount);
478 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
479 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
480 if (!ctx->entities[i][j])
483 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
487 amdgpu_ctx_fini(ref);
490 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
492 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
493 struct amdgpu_ctx *ctx;
495 mutex_lock(&mgr->lock);
496 ctx = idr_remove(&mgr->ctx_handles, id);
498 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
499 mutex_unlock(&mgr->lock);
500 return ctx ? 0 : -EINVAL;
503 static int amdgpu_ctx_query(struct amdgpu_device *adev,
504 struct amdgpu_fpriv *fpriv, uint32_t id,
505 union drm_amdgpu_ctx_out *out)
507 struct amdgpu_ctx *ctx;
508 struct amdgpu_ctx_mgr *mgr;
509 unsigned reset_counter;
514 mgr = &fpriv->ctx_mgr;
515 mutex_lock(&mgr->lock);
516 ctx = idr_find(&mgr->ctx_handles, id);
518 mutex_unlock(&mgr->lock);
522 /* TODO: these two are always zero */
523 out->state.flags = 0x0;
524 out->state.hangs = 0x0;
526 /* determine if a GPU reset has occured since the last call */
527 reset_counter = atomic_read(&adev->gpu_reset_counter);
528 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
529 if (ctx->reset_counter_query == reset_counter)
530 out->state.reset_status = AMDGPU_CTX_NO_RESET;
532 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
533 ctx->reset_counter_query = reset_counter;
535 mutex_unlock(&mgr->lock);
539 #define AMDGPU_RAS_COUNTE_DELAY_MS 3000
541 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
542 struct amdgpu_fpriv *fpriv, uint32_t id,
543 union drm_amdgpu_ctx_out *out)
545 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
546 struct amdgpu_ctx *ctx;
547 struct amdgpu_ctx_mgr *mgr;
552 mgr = &fpriv->ctx_mgr;
553 mutex_lock(&mgr->lock);
554 ctx = idr_find(&mgr->ctx_handles, id);
556 mutex_unlock(&mgr->lock);
560 out->state.flags = 0x0;
561 out->state.hangs = 0x0;
563 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
564 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
566 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
567 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
569 if (atomic_read(&ctx->guilty))
570 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
572 if (adev->ras_enabled && con) {
573 /* Return the cached values in O(1),
574 * and schedule delayed work to cache
577 int ce_count, ue_count;
579 ce_count = atomic_read(&con->ras_ce_count);
580 ue_count = atomic_read(&con->ras_ue_count);
582 if (ce_count != ctx->ras_counter_ce) {
583 ctx->ras_counter_ce = ce_count;
584 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
587 if (ue_count != ctx->ras_counter_ue) {
588 ctx->ras_counter_ue = ue_count;
589 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
592 schedule_delayed_work(&con->ras_counte_delay_work,
593 msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
596 mutex_unlock(&mgr->lock);
602 static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
603 struct amdgpu_fpriv *fpriv, uint32_t id,
604 bool set, u32 *stable_pstate)
606 struct amdgpu_ctx *ctx;
607 struct amdgpu_ctx_mgr *mgr;
613 mgr = &fpriv->ctx_mgr;
614 mutex_lock(&mgr->lock);
615 ctx = idr_find(&mgr->ctx_handles, id);
617 mutex_unlock(&mgr->lock);
622 r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
624 r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
626 mutex_unlock(&mgr->lock);
630 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
631 struct drm_file *filp)
634 uint32_t id, stable_pstate;
637 union drm_amdgpu_ctx *args = data;
638 struct amdgpu_device *adev = drm_to_adev(dev);
639 struct amdgpu_fpriv *fpriv = filp->driver_priv;
641 id = args->in.ctx_id;
642 priority = args->in.priority;
644 /* For backwards compatibility reasons, we need to accept
645 * ioctls with garbage in the priority field */
646 if (!amdgpu_ctx_priority_is_valid(priority))
647 priority = AMDGPU_CTX_PRIORITY_NORMAL;
649 switch (args->in.op) {
650 case AMDGPU_CTX_OP_ALLOC_CTX:
651 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
652 args->out.alloc.ctx_id = id;
654 case AMDGPU_CTX_OP_FREE_CTX:
655 r = amdgpu_ctx_free(fpriv, id);
657 case AMDGPU_CTX_OP_QUERY_STATE:
658 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
660 case AMDGPU_CTX_OP_QUERY_STATE2:
661 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
663 case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
666 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
668 args->out.pstate.flags = stable_pstate;
670 case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
671 if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
673 stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
674 if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
676 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
685 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
687 struct amdgpu_ctx *ctx;
688 struct amdgpu_ctx_mgr *mgr;
693 mgr = &fpriv->ctx_mgr;
695 mutex_lock(&mgr->lock);
696 ctx = idr_find(&mgr->ctx_handles, id);
698 kref_get(&ctx->refcount);
699 mutex_unlock(&mgr->lock);
703 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
708 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
712 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
713 struct drm_sched_entity *entity,
714 struct dma_fence *fence)
716 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
717 uint64_t seq = centity->sequence;
718 struct dma_fence *other = NULL;
721 idx = seq & (amdgpu_sched_jobs - 1);
722 other = centity->fences[idx];
723 WARN_ON(other && !dma_fence_is_signaled(other));
725 dma_fence_get(fence);
727 spin_lock(&ctx->ring_lock);
728 centity->fences[idx] = fence;
730 spin_unlock(&ctx->ring_lock);
732 atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
733 &ctx->mgr->time_spend[centity->hw_ip]);
735 dma_fence_put(other);
739 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
740 struct drm_sched_entity *entity,
743 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
744 struct dma_fence *fence;
746 spin_lock(&ctx->ring_lock);
749 seq = centity->sequence - 1;
751 if (seq >= centity->sequence) {
752 spin_unlock(&ctx->ring_lock);
753 return ERR_PTR(-EINVAL);
757 if (seq + amdgpu_sched_jobs < centity->sequence) {
758 spin_unlock(&ctx->ring_lock);
762 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
763 spin_unlock(&ctx->ring_lock);
768 static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
769 struct amdgpu_ctx_entity *aentity,
773 struct amdgpu_device *adev = ctx->mgr->adev;
774 unsigned int hw_prio;
775 struct drm_gpu_scheduler **scheds = NULL;
778 /* set sw priority */
779 drm_sched_entity_set_priority(&aentity->entity,
780 amdgpu_ctx_to_drm_sched_prio(priority));
782 /* set hw priority */
783 if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
784 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
785 hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
786 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
787 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
788 drm_sched_entity_modify_sched(&aentity->entity, scheds,
793 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
799 ctx->override_priority = priority;
801 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
802 ctx->init_priority : ctx->override_priority;
803 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
804 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
805 if (!ctx->entities[i][j])
808 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
814 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
815 struct drm_sched_entity *entity)
817 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
818 struct dma_fence *other;
822 spin_lock(&ctx->ring_lock);
823 idx = centity->sequence & (amdgpu_sched_jobs - 1);
824 other = dma_fence_get(centity->fences[idx]);
825 spin_unlock(&ctx->ring_lock);
830 r = dma_fence_wait(other, true);
831 if (r < 0 && r != -ERESTARTSYS)
832 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
834 dma_fence_put(other);
838 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
839 struct amdgpu_device *adev)
844 mutex_init(&mgr->lock);
845 idr_init(&mgr->ctx_handles);
847 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
848 atomic64_set(&mgr->time_spend[i], 0);
851 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
853 struct amdgpu_ctx *ctx;
857 idp = &mgr->ctx_handles;
859 mutex_lock(&mgr->lock);
860 idr_for_each_entry(idp, ctx, id) {
861 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
862 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
863 struct drm_sched_entity *entity;
865 if (!ctx->entities[i][j])
868 entity = &ctx->entities[i][j]->entity;
869 timeout = drm_sched_entity_flush(entity, timeout);
873 mutex_unlock(&mgr->lock);
877 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
879 struct amdgpu_ctx *ctx;
883 idp = &mgr->ctx_handles;
885 idr_for_each_entry(idp, ctx, id) {
886 if (kref_read(&ctx->refcount) != 1) {
887 DRM_ERROR("ctx %p is still alive\n", ctx);
891 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
892 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
893 struct drm_sched_entity *entity;
895 if (!ctx->entities[i][j])
898 entity = &ctx->entities[i][j]->entity;
899 drm_sched_entity_fini(entity);
905 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
907 struct amdgpu_ctx *ctx;
911 amdgpu_ctx_mgr_entity_fini(mgr);
913 idp = &mgr->ctx_handles;
915 idr_for_each_entry(idp, ctx, id) {
916 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
917 DRM_ERROR("ctx %p is still alive\n", ctx);
920 idr_destroy(&mgr->ctx_handles);
921 mutex_destroy(&mgr->lock);
924 void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
925 ktime_t usage[AMDGPU_HW_IP_NUM])
927 struct amdgpu_ctx *ctx;
928 unsigned int hw_ip, i;
932 * This is a little bit racy because it can be that a ctx or a fence are
933 * destroyed just in the moment we try to account them. But that is ok
934 * since exactly that case is explicitely allowed by the interface.
936 mutex_lock(&mgr->lock);
937 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
938 uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
940 usage[hw_ip] = ns_to_ktime(ns);
943 idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
944 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
945 for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
946 struct amdgpu_ctx_entity *centity;
949 centity = ctx->entities[hw_ip][i];
952 spend = amdgpu_ctx_entity_time(ctx, centity);
953 usage[hw_ip] = ktime_add(usage[hw_ip], spend);
957 mutex_unlock(&mgr->lock);