2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/drm_auth.h>
28 #include "amdgpu_sched.h"
30 #define to_amdgpu_ctx_entity(e) \
31 container_of((e), struct amdgpu_ctx_entity, entity)
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 [AMDGPU_HW_IP_GFX] = 1,
35 [AMDGPU_HW_IP_COMPUTE] = 4,
36 [AMDGPU_HW_IP_DMA] = 2,
37 [AMDGPU_HW_IP_UVD] = 1,
38 [AMDGPU_HW_IP_VCE] = 1,
39 [AMDGPU_HW_IP_UVD_ENC] = 1,
40 [AMDGPU_HW_IP_VCN_DEC] = 1,
41 [AMDGPU_HW_IP_VCN_ENC] = 1,
44 static int amdgput_ctx_total_num_entities(void)
46 unsigned i, num_entities = 0;
48 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
49 num_entities += amdgpu_ctx_num_entities[i];
54 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
55 enum drm_sched_priority priority)
57 /* NORMAL and below are accessible by everyone */
58 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
61 if (capable(CAP_SYS_NICE))
64 if (drm_is_current_master(filp))
70 static int amdgpu_ctx_init(struct amdgpu_device *adev,
71 enum drm_sched_priority priority,
72 struct drm_file *filp,
73 struct amdgpu_ctx *ctx)
75 unsigned num_entities = amdgput_ctx_total_num_entities();
79 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
82 r = amdgpu_ctx_priority_permit(filp, priority);
86 memset(ctx, 0, sizeof(*ctx));
89 ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
90 sizeof(struct dma_fence*), GFP_KERNEL);
94 ctx->entities[0] = kcalloc(num_entities,
95 sizeof(struct amdgpu_ctx_entity),
97 if (!ctx->entities[0]) {
99 goto error_free_fences;
102 for (i = 0; i < num_entities; ++i) {
103 struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105 entity->sequence = 1;
106 entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108 for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
109 ctx->entities[i] = ctx->entities[i - 1] +
110 amdgpu_ctx_num_entities[i - 1];
112 kref_init(&ctx->refcount);
113 spin_lock_init(&ctx->ring_lock);
114 mutex_init(&ctx->lock);
116 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
117 ctx->reset_counter_query = ctx->reset_counter;
118 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
119 ctx->init_priority = priority;
120 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
123 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
124 struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
128 case AMDGPU_HW_IP_GFX:
129 rings[0] = &adev->gfx.gfx_ring[0];
132 case AMDGPU_HW_IP_COMPUTE:
133 for (j = 0; j < adev->gfx.num_compute_rings; ++j)
134 rings[j] = &adev->gfx.compute_ring[j];
135 num_rings = adev->gfx.num_compute_rings;
137 case AMDGPU_HW_IP_DMA:
138 for (j = 0; j < adev->sdma.num_instances; ++j)
139 rings[j] = &adev->sdma.instance[j].ring;
140 num_rings = adev->sdma.num_instances;
142 case AMDGPU_HW_IP_UVD:
143 rings[0] = &adev->uvd.inst[0].ring;
146 case AMDGPU_HW_IP_VCE:
147 rings[0] = &adev->vce.ring[0];
150 case AMDGPU_HW_IP_UVD_ENC:
151 rings[0] = &adev->uvd.inst[0].ring_enc[0];
154 case AMDGPU_HW_IP_VCN_DEC:
155 rings[0] = &adev->vcn.ring_dec;
158 case AMDGPU_HW_IP_VCN_ENC:
159 rings[0] = &adev->vcn.ring_enc[0];
162 case AMDGPU_HW_IP_VCN_JPEG:
163 rings[0] = &adev->vcn.ring_jpeg;
168 for (j = 0; j < num_rings; ++j)
169 rqs[j] = &rings[j]->sched.sched_rq[priority];
171 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
172 r = drm_sched_entity_init(&ctx->entities[i][j].entity,
173 rqs, num_rings, &ctx->guilty);
175 goto error_cleanup_entities;
180 error_cleanup_entities:
181 for (i = 0; i < num_entities; ++i)
182 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
183 kfree(ctx->entities[0]);
191 static void amdgpu_ctx_fini(struct kref *ref)
193 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
194 unsigned num_entities = amdgput_ctx_total_num_entities();
195 struct amdgpu_device *adev = ctx->adev;
201 for (i = 0; i < num_entities; ++i)
202 for (j = 0; j < amdgpu_sched_jobs; ++j)
203 dma_fence_put(ctx->entities[0][i].fences[j]);
205 kfree(ctx->entities[0]);
207 mutex_destroy(&ctx->lock);
212 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
213 u32 ring, struct drm_sched_entity **entity)
215 if (hw_ip >= AMDGPU_HW_IP_NUM) {
216 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
220 /* Right now all IPs have only one instance - multiple rings. */
222 DRM_DEBUG("invalid ip instance: %d\n", instance);
226 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
227 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
231 *entity = &ctx->entities[hw_ip][ring].entity;
235 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
236 struct amdgpu_fpriv *fpriv,
237 struct drm_file *filp,
238 enum drm_sched_priority priority,
241 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
242 struct amdgpu_ctx *ctx;
245 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
249 mutex_lock(&mgr->lock);
250 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
252 mutex_unlock(&mgr->lock);
258 r = amdgpu_ctx_init(adev, priority, filp, ctx);
260 idr_remove(&mgr->ctx_handles, *id);
264 mutex_unlock(&mgr->lock);
268 static void amdgpu_ctx_do_release(struct kref *ref)
270 struct amdgpu_ctx *ctx;
271 unsigned num_entities;
274 ctx = container_of(ref, struct amdgpu_ctx, refcount);
277 for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
278 num_entities += amdgpu_ctx_num_entities[i];
280 for (i = 0; i < num_entities; i++)
281 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
283 amdgpu_ctx_fini(ref);
286 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
288 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
289 struct amdgpu_ctx *ctx;
291 mutex_lock(&mgr->lock);
292 ctx = idr_remove(&mgr->ctx_handles, id);
294 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
295 mutex_unlock(&mgr->lock);
296 return ctx ? 0 : -EINVAL;
299 static int amdgpu_ctx_query(struct amdgpu_device *adev,
300 struct amdgpu_fpriv *fpriv, uint32_t id,
301 union drm_amdgpu_ctx_out *out)
303 struct amdgpu_ctx *ctx;
304 struct amdgpu_ctx_mgr *mgr;
305 unsigned reset_counter;
310 mgr = &fpriv->ctx_mgr;
311 mutex_lock(&mgr->lock);
312 ctx = idr_find(&mgr->ctx_handles, id);
314 mutex_unlock(&mgr->lock);
318 /* TODO: these two are always zero */
319 out->state.flags = 0x0;
320 out->state.hangs = 0x0;
322 /* determine if a GPU reset has occured since the last call */
323 reset_counter = atomic_read(&adev->gpu_reset_counter);
324 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
325 if (ctx->reset_counter_query == reset_counter)
326 out->state.reset_status = AMDGPU_CTX_NO_RESET;
328 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
329 ctx->reset_counter_query = reset_counter;
331 mutex_unlock(&mgr->lock);
335 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
336 struct amdgpu_fpriv *fpriv, uint32_t id,
337 union drm_amdgpu_ctx_out *out)
339 struct amdgpu_ctx *ctx;
340 struct amdgpu_ctx_mgr *mgr;
345 mgr = &fpriv->ctx_mgr;
346 mutex_lock(&mgr->lock);
347 ctx = idr_find(&mgr->ctx_handles, id);
349 mutex_unlock(&mgr->lock);
353 out->state.flags = 0x0;
354 out->state.hangs = 0x0;
356 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
357 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
359 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
360 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
362 if (atomic_read(&ctx->guilty))
363 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
365 mutex_unlock(&mgr->lock);
369 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *filp)
374 enum drm_sched_priority priority;
376 union drm_amdgpu_ctx *args = data;
377 struct amdgpu_device *adev = dev->dev_private;
378 struct amdgpu_fpriv *fpriv = filp->driver_priv;
381 id = args->in.ctx_id;
382 priority = amdgpu_to_sched_priority(args->in.priority);
384 /* For backwards compatibility reasons, we need to accept
385 * ioctls with garbage in the priority field */
386 if (priority == DRM_SCHED_PRIORITY_INVALID)
387 priority = DRM_SCHED_PRIORITY_NORMAL;
389 switch (args->in.op) {
390 case AMDGPU_CTX_OP_ALLOC_CTX:
391 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
392 args->out.alloc.ctx_id = id;
394 case AMDGPU_CTX_OP_FREE_CTX:
395 r = amdgpu_ctx_free(fpriv, id);
397 case AMDGPU_CTX_OP_QUERY_STATE:
398 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
400 case AMDGPU_CTX_OP_QUERY_STATE2:
401 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
410 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
412 struct amdgpu_ctx *ctx;
413 struct amdgpu_ctx_mgr *mgr;
418 mgr = &fpriv->ctx_mgr;
420 mutex_lock(&mgr->lock);
421 ctx = idr_find(&mgr->ctx_handles, id);
423 kref_get(&ctx->refcount);
424 mutex_unlock(&mgr->lock);
428 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
433 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
437 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
438 struct drm_sched_entity *entity,
439 struct dma_fence *fence, uint64_t* handle)
441 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
442 uint64_t seq = centity->sequence;
443 struct dma_fence *other = NULL;
446 idx = seq & (amdgpu_sched_jobs - 1);
447 other = centity->fences[idx];
449 BUG_ON(!dma_fence_is_signaled(other));
451 dma_fence_get(fence);
453 spin_lock(&ctx->ring_lock);
454 centity->fences[idx] = fence;
456 spin_unlock(&ctx->ring_lock);
458 dma_fence_put(other);
463 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
464 struct drm_sched_entity *entity,
467 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
468 struct dma_fence *fence;
470 spin_lock(&ctx->ring_lock);
473 seq = centity->sequence - 1;
475 if (seq >= centity->sequence) {
476 spin_unlock(&ctx->ring_lock);
477 return ERR_PTR(-EINVAL);
481 if (seq + amdgpu_sched_jobs < centity->sequence) {
482 spin_unlock(&ctx->ring_lock);
486 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
487 spin_unlock(&ctx->ring_lock);
492 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
493 enum drm_sched_priority priority)
495 unsigned num_entities = amdgput_ctx_total_num_entities();
496 enum drm_sched_priority ctx_prio;
499 ctx->override_priority = priority;
501 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
502 ctx->init_priority : ctx->override_priority;
504 for (i = 0; i < num_entities; i++) {
505 struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
507 drm_sched_entity_set_priority(entity, ctx_prio);
511 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
512 struct drm_sched_entity *entity)
514 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
515 unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
516 struct dma_fence *other = centity->fences[idx];
520 r = dma_fence_wait(other, true);
522 if (r != -ERESTARTSYS)
523 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
532 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
534 mutex_init(&mgr->lock);
535 idr_init(&mgr->ctx_handles);
538 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
540 unsigned num_entities = amdgput_ctx_total_num_entities();
541 struct amdgpu_ctx *ctx;
544 long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
546 idp = &mgr->ctx_handles;
548 mutex_lock(&mgr->lock);
549 idr_for_each_entry(idp, ctx, id) {
552 mutex_unlock(&mgr->lock);
556 for (i = 0; i < num_entities; i++) {
557 struct drm_sched_entity *entity;
559 entity = &ctx->entities[0][i].entity;
560 max_wait = drm_sched_entity_flush(entity, max_wait);
563 mutex_unlock(&mgr->lock);
566 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
568 unsigned num_entities = amdgput_ctx_total_num_entities();
569 struct amdgpu_ctx *ctx;
573 idp = &mgr->ctx_handles;
575 idr_for_each_entry(idp, ctx, id) {
580 if (kref_read(&ctx->refcount) != 1) {
581 DRM_ERROR("ctx %p is still alive\n", ctx);
585 for (i = 0; i < num_entities; i++)
586 drm_sched_entity_fini(&ctx->entities[0][i].entity);
590 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
592 struct amdgpu_ctx *ctx;
596 amdgpu_ctx_mgr_entity_fini(mgr);
598 idp = &mgr->ctx_handles;
600 idr_for_each_entry(idp, ctx, id) {
601 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
602 DRM_ERROR("ctx %p is still alive\n", ctx);
605 idr_destroy(&mgr->ctx_handles);
606 mutex_destroy(&mgr->lock);