2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <drm/drm_auth.h>
26 #include <drm/drm_drv.h>
28 #include "amdgpu_sched.h"
29 #include "amdgpu_ras.h"
30 #include <linux/nospec.h>
32 #define to_amdgpu_ctx_entity(e) \
33 container_of((e), struct amdgpu_ctx_entity, entity)
35 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
36 [AMDGPU_HW_IP_GFX] = 1,
37 [AMDGPU_HW_IP_COMPUTE] = 4,
38 [AMDGPU_HW_IP_DMA] = 2,
39 [AMDGPU_HW_IP_UVD] = 1,
40 [AMDGPU_HW_IP_VCE] = 1,
41 [AMDGPU_HW_IP_UVD_ENC] = 1,
42 [AMDGPU_HW_IP_VCN_DEC] = 1,
43 [AMDGPU_HW_IP_VCN_ENC] = 1,
44 [AMDGPU_HW_IP_VCN_JPEG] = 1,
47 bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
50 case AMDGPU_CTX_PRIORITY_UNSET:
51 case AMDGPU_CTX_PRIORITY_VERY_LOW:
52 case AMDGPU_CTX_PRIORITY_LOW:
53 case AMDGPU_CTX_PRIORITY_NORMAL:
54 case AMDGPU_CTX_PRIORITY_HIGH:
55 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
62 static enum drm_sched_priority
63 amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
66 case AMDGPU_CTX_PRIORITY_UNSET:
67 return DRM_SCHED_PRIORITY_UNSET;
69 case AMDGPU_CTX_PRIORITY_VERY_LOW:
70 return DRM_SCHED_PRIORITY_MIN;
72 case AMDGPU_CTX_PRIORITY_LOW:
73 return DRM_SCHED_PRIORITY_MIN;
75 case AMDGPU_CTX_PRIORITY_NORMAL:
76 return DRM_SCHED_PRIORITY_NORMAL;
78 case AMDGPU_CTX_PRIORITY_HIGH:
79 return DRM_SCHED_PRIORITY_HIGH;
81 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
82 return DRM_SCHED_PRIORITY_HIGH;
84 /* This should not happen as we sanitized userspace provided priority
85 * already, WARN if this happens.
88 WARN(1, "Invalid context priority %d\n", ctx_prio);
89 return DRM_SCHED_PRIORITY_NORMAL;
94 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
97 if (!amdgpu_ctx_priority_is_valid(priority))
100 /* NORMAL and below are accessible by everyone */
101 if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
104 if (capable(CAP_SYS_NICE))
107 if (drm_is_current_master(filp))
113 static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
116 case AMDGPU_CTX_PRIORITY_HIGH:
117 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
118 return AMDGPU_GFX_PIPE_PRIO_HIGH;
120 return AMDGPU_GFX_PIPE_PRIO_NORMAL;
124 static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
127 case AMDGPU_CTX_PRIORITY_HIGH:
128 return AMDGPU_RING_PRIO_1;
129 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
130 return AMDGPU_RING_PRIO_2;
132 return AMDGPU_RING_PRIO_0;
136 static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
138 struct amdgpu_device *adev = ctx->adev;
140 unsigned int hw_prio;
142 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
143 ctx->init_priority : ctx->override_priority;
146 case AMDGPU_HW_IP_COMPUTE:
147 hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
149 case AMDGPU_HW_IP_VCE:
150 case AMDGPU_HW_IP_VCN_ENC:
151 hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
154 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
158 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
159 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
160 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
166 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
169 struct amdgpu_device *adev = ctx->adev;
170 struct amdgpu_ctx_entity *entity;
171 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
172 unsigned num_scheds = 0;
174 unsigned int hw_prio;
175 enum drm_sched_priority drm_prio;
178 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
183 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
184 ctx->init_priority : ctx->override_priority;
185 entity->sequence = 1;
186 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
187 drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
189 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
190 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
191 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
193 /* disable load balance if the hw engine retains context among dependent jobs */
194 if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
195 hw_ip == AMDGPU_HW_IP_VCN_DEC ||
196 hw_ip == AMDGPU_HW_IP_UVD_ENC ||
197 hw_ip == AMDGPU_HW_IP_UVD) {
198 sched = drm_sched_pick_best(scheds, num_scheds);
203 r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
206 goto error_free_entity;
208 /* It's not an error if we fail to install the new entity */
209 if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
215 drm_sched_entity_fini(&entity->entity);
223 static int amdgpu_ctx_init(struct amdgpu_device *adev,
225 struct drm_file *filp,
226 struct amdgpu_ctx *ctx)
230 r = amdgpu_ctx_priority_permit(filp, priority);
234 memset(ctx, 0, sizeof(*ctx));
238 kref_init(&ctx->refcount);
239 spin_lock_init(&ctx->ring_lock);
240 mutex_init(&ctx->lock);
242 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
243 ctx->reset_counter_query = ctx->reset_counter;
244 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
245 ctx->init_priority = priority;
246 ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
247 ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
252 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
260 for (i = 0; i < amdgpu_sched_jobs; ++i)
261 dma_fence_put(entity->fences[i]);
266 static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
269 struct amdgpu_device *adev = ctx->adev;
270 enum amd_dpm_forced_level current_level;
272 current_level = amdgpu_dpm_get_performance_level(adev);
274 switch (current_level) {
275 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
276 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
278 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
279 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
281 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
282 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
284 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
285 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
288 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
294 static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
297 struct amdgpu_device *adev = ctx->adev;
298 enum amd_dpm_forced_level level;
301 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
302 if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
307 switch (stable_pstate) {
308 case AMDGPU_CTX_STABLE_PSTATE_NONE:
309 level = AMD_DPM_FORCED_LEVEL_AUTO;
311 case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
312 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
314 case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
315 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
317 case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
318 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
320 case AMDGPU_CTX_STABLE_PSTATE_PEAK:
321 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
328 r = amdgpu_dpm_force_performance_level(adev, level);
330 if (level == AMD_DPM_FORCED_LEVEL_AUTO)
331 adev->pm.stable_pstate_ctx = NULL;
333 adev->pm.stable_pstate_ctx = ctx;
335 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
340 static void amdgpu_ctx_fini(struct kref *ref)
342 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
343 struct amdgpu_device *adev = ctx->adev;
349 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
350 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
351 amdgpu_ctx_fini_entity(ctx->entities[i][j]);
352 ctx->entities[i][j] = NULL;
356 if (drm_dev_enter(&adev->ddev, &idx)) {
357 amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE);
361 mutex_destroy(&ctx->lock);
365 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
366 u32 ring, struct drm_sched_entity **entity)
370 if (hw_ip >= AMDGPU_HW_IP_NUM) {
371 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
375 /* Right now all IPs have only one instance - multiple rings. */
377 DRM_DEBUG("invalid ip instance: %d\n", instance);
381 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
382 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
386 if (ctx->entities[hw_ip][ring] == NULL) {
387 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
392 *entity = &ctx->entities[hw_ip][ring]->entity;
396 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
397 struct amdgpu_fpriv *fpriv,
398 struct drm_file *filp,
402 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
403 struct amdgpu_ctx *ctx;
406 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
410 mutex_lock(&mgr->lock);
411 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
413 mutex_unlock(&mgr->lock);
419 r = amdgpu_ctx_init(adev, priority, filp, ctx);
421 idr_remove(&mgr->ctx_handles, *id);
425 mutex_unlock(&mgr->lock);
429 static void amdgpu_ctx_do_release(struct kref *ref)
431 struct amdgpu_ctx *ctx;
434 ctx = container_of(ref, struct amdgpu_ctx, refcount);
435 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
436 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
437 if (!ctx->entities[i][j])
440 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
444 amdgpu_ctx_fini(ref);
447 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
449 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
450 struct amdgpu_ctx *ctx;
452 mutex_lock(&mgr->lock);
453 ctx = idr_remove(&mgr->ctx_handles, id);
455 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
456 mutex_unlock(&mgr->lock);
457 return ctx ? 0 : -EINVAL;
460 static int amdgpu_ctx_query(struct amdgpu_device *adev,
461 struct amdgpu_fpriv *fpriv, uint32_t id,
462 union drm_amdgpu_ctx_out *out)
464 struct amdgpu_ctx *ctx;
465 struct amdgpu_ctx_mgr *mgr;
466 unsigned reset_counter;
471 mgr = &fpriv->ctx_mgr;
472 mutex_lock(&mgr->lock);
473 ctx = idr_find(&mgr->ctx_handles, id);
475 mutex_unlock(&mgr->lock);
479 /* TODO: these two are always zero */
480 out->state.flags = 0x0;
481 out->state.hangs = 0x0;
483 /* determine if a GPU reset has occured since the last call */
484 reset_counter = atomic_read(&adev->gpu_reset_counter);
485 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
486 if (ctx->reset_counter_query == reset_counter)
487 out->state.reset_status = AMDGPU_CTX_NO_RESET;
489 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
490 ctx->reset_counter_query = reset_counter;
492 mutex_unlock(&mgr->lock);
496 #define AMDGPU_RAS_COUNTE_DELAY_MS 3000
498 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
499 struct amdgpu_fpriv *fpriv, uint32_t id,
500 union drm_amdgpu_ctx_out *out)
502 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
503 struct amdgpu_ctx *ctx;
504 struct amdgpu_ctx_mgr *mgr;
509 mgr = &fpriv->ctx_mgr;
510 mutex_lock(&mgr->lock);
511 ctx = idr_find(&mgr->ctx_handles, id);
513 mutex_unlock(&mgr->lock);
517 out->state.flags = 0x0;
518 out->state.hangs = 0x0;
520 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
521 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
523 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
524 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
526 if (atomic_read(&ctx->guilty))
527 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
529 if (adev->ras_enabled && con) {
530 /* Return the cached values in O(1),
531 * and schedule delayed work to cache
534 int ce_count, ue_count;
536 ce_count = atomic_read(&con->ras_ce_count);
537 ue_count = atomic_read(&con->ras_ue_count);
539 if (ce_count != ctx->ras_counter_ce) {
540 ctx->ras_counter_ce = ce_count;
541 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
544 if (ue_count != ctx->ras_counter_ue) {
545 ctx->ras_counter_ue = ue_count;
546 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
549 schedule_delayed_work(&con->ras_counte_delay_work,
550 msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
553 mutex_unlock(&mgr->lock);
559 static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
560 struct amdgpu_fpriv *fpriv, uint32_t id,
561 bool set, u32 *stable_pstate)
563 struct amdgpu_ctx *ctx;
564 struct amdgpu_ctx_mgr *mgr;
570 mgr = &fpriv->ctx_mgr;
571 mutex_lock(&mgr->lock);
572 ctx = idr_find(&mgr->ctx_handles, id);
574 mutex_unlock(&mgr->lock);
579 r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
581 r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
583 mutex_unlock(&mgr->lock);
587 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
588 struct drm_file *filp)
591 uint32_t id, stable_pstate;
594 union drm_amdgpu_ctx *args = data;
595 struct amdgpu_device *adev = drm_to_adev(dev);
596 struct amdgpu_fpriv *fpriv = filp->driver_priv;
598 id = args->in.ctx_id;
599 priority = args->in.priority;
601 /* For backwards compatibility reasons, we need to accept
602 * ioctls with garbage in the priority field */
603 if (!amdgpu_ctx_priority_is_valid(priority))
604 priority = AMDGPU_CTX_PRIORITY_NORMAL;
606 switch (args->in.op) {
607 case AMDGPU_CTX_OP_ALLOC_CTX:
608 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
609 args->out.alloc.ctx_id = id;
611 case AMDGPU_CTX_OP_FREE_CTX:
612 r = amdgpu_ctx_free(fpriv, id);
614 case AMDGPU_CTX_OP_QUERY_STATE:
615 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
617 case AMDGPU_CTX_OP_QUERY_STATE2:
618 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
620 case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
623 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
625 args->out.pstate.flags = stable_pstate;
627 case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
628 if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
630 stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
631 if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
633 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
642 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
644 struct amdgpu_ctx *ctx;
645 struct amdgpu_ctx_mgr *mgr;
650 mgr = &fpriv->ctx_mgr;
652 mutex_lock(&mgr->lock);
653 ctx = idr_find(&mgr->ctx_handles, id);
655 kref_get(&ctx->refcount);
656 mutex_unlock(&mgr->lock);
660 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
665 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
669 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
670 struct drm_sched_entity *entity,
671 struct dma_fence *fence, uint64_t *handle)
673 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
674 uint64_t seq = centity->sequence;
675 struct dma_fence *other = NULL;
678 idx = seq & (amdgpu_sched_jobs - 1);
679 other = centity->fences[idx];
681 BUG_ON(!dma_fence_is_signaled(other));
683 dma_fence_get(fence);
685 spin_lock(&ctx->ring_lock);
686 centity->fences[idx] = fence;
688 spin_unlock(&ctx->ring_lock);
690 dma_fence_put(other);
695 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
696 struct drm_sched_entity *entity,
699 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
700 struct dma_fence *fence;
702 spin_lock(&ctx->ring_lock);
705 seq = centity->sequence - 1;
707 if (seq >= centity->sequence) {
708 spin_unlock(&ctx->ring_lock);
709 return ERR_PTR(-EINVAL);
713 if (seq + amdgpu_sched_jobs < centity->sequence) {
714 spin_unlock(&ctx->ring_lock);
718 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
719 spin_unlock(&ctx->ring_lock);
724 static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
725 struct amdgpu_ctx_entity *aentity,
729 struct amdgpu_device *adev = ctx->adev;
730 unsigned int hw_prio;
731 struct drm_gpu_scheduler **scheds = NULL;
734 /* set sw priority */
735 drm_sched_entity_set_priority(&aentity->entity,
736 amdgpu_ctx_to_drm_sched_prio(priority));
738 /* set hw priority */
739 if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
740 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
741 hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
742 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
743 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
744 drm_sched_entity_modify_sched(&aentity->entity, scheds,
749 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
755 ctx->override_priority = priority;
757 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
758 ctx->init_priority : ctx->override_priority;
759 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
760 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
761 if (!ctx->entities[i][j])
764 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
770 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
771 struct drm_sched_entity *entity)
773 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
774 struct dma_fence *other;
778 spin_lock(&ctx->ring_lock);
779 idx = centity->sequence & (amdgpu_sched_jobs - 1);
780 other = dma_fence_get(centity->fences[idx]);
781 spin_unlock(&ctx->ring_lock);
786 r = dma_fence_wait(other, true);
787 if (r < 0 && r != -ERESTARTSYS)
788 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
790 dma_fence_put(other);
794 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
796 mutex_init(&mgr->lock);
797 idr_init(&mgr->ctx_handles);
800 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
802 struct amdgpu_ctx *ctx;
806 idp = &mgr->ctx_handles;
808 mutex_lock(&mgr->lock);
809 idr_for_each_entry(idp, ctx, id) {
810 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
811 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
812 struct drm_sched_entity *entity;
814 if (!ctx->entities[i][j])
817 entity = &ctx->entities[i][j]->entity;
818 timeout = drm_sched_entity_flush(entity, timeout);
822 mutex_unlock(&mgr->lock);
826 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
828 struct amdgpu_ctx *ctx;
832 idp = &mgr->ctx_handles;
834 idr_for_each_entry(idp, ctx, id) {
835 if (kref_read(&ctx->refcount) != 1) {
836 DRM_ERROR("ctx %p is still alive\n", ctx);
840 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
841 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
842 struct drm_sched_entity *entity;
844 if (!ctx->entities[i][j])
847 entity = &ctx->entities[i][j]->entity;
848 drm_sched_entity_fini(entity);
854 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
856 struct amdgpu_ctx *ctx;
860 amdgpu_ctx_mgr_entity_fini(mgr);
862 idp = &mgr->ctx_handles;
864 idr_for_each_entry(idp, ctx, id) {
865 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
866 DRM_ERROR("ctx %p is still alive\n", ctx);
869 idr_destroy(&mgr->ctx_handles);
870 mutex_destroy(&mgr->lock);
873 static void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx,
874 struct amdgpu_ctx_entity *centity, ktime_t *total, ktime_t *max)
882 for (i = 0; i < amdgpu_sched_jobs; i++) {
883 struct dma_fence *fence;
884 struct drm_sched_fence *s_fence;
886 spin_lock(&ctx->ring_lock);
887 fence = dma_fence_get(centity->fences[i]);
888 spin_unlock(&ctx->ring_lock);
891 s_fence = to_drm_sched_fence(fence);
892 if (!dma_fence_is_signaled(&s_fence->scheduled)) {
893 dma_fence_put(fence);
896 t1 = s_fence->scheduled.timestamp;
897 if (!ktime_before(t1, now)) {
898 dma_fence_put(fence);
901 if (dma_fence_is_signaled(&s_fence->finished) &&
902 s_fence->finished.timestamp < now)
903 *total += ktime_sub(s_fence->finished.timestamp, t1);
905 *total += ktime_sub(now, t1);
906 t1 = ktime_sub(now, t1);
907 dma_fence_put(fence);
908 *max = max(t1, *max);
912 ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
913 uint32_t idx, uint64_t *elapsed)
916 struct amdgpu_ctx *ctx;
918 struct amdgpu_ctx_entity *centity;
919 ktime_t total = 0, max = 0;
921 if (idx >= AMDGPU_MAX_ENTITY_NUM)
923 idp = &mgr->ctx_handles;
924 mutex_lock(&mgr->lock);
925 idr_for_each_entry(idp, ctx, id) {
926 ktime_t ttotal, tmax;
928 if (!ctx->entities[hwip][idx])
931 centity = ctx->entities[hwip][idx];
932 amdgpu_ctx_fence_time(ctx, centity, &ttotal, &tmax);
934 /* Harmonic mean approximation diverges for very small
935 * values. If ratio < 0.01% ignore
937 if (AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(tmax, ttotal))
940 total = ktime_add(total, ttotal);
941 max = ktime_after(tmax, max) ? tmax : max;
944 mutex_unlock(&mgr->lock);