2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
33 memset(ctx, 0, sizeof(*ctx));
35 kref_init(&ctx->refcount);
36 spin_lock_init(&ctx->ring_lock);
37 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
38 sizeof(struct fence*), GFP_KERNEL);
42 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
43 ctx->rings[i].sequence = 1;
44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
47 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
49 /* create context entity for each ring */
50 for (i = 0; i < adev->num_rings; i++) {
51 struct amdgpu_ring *ring = adev->rings[i];
52 struct amd_sched_rq *rq;
54 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
55 r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
56 rq, amdgpu_sched_jobs);
61 if (i < adev->num_rings) {
62 for (j = 0; j < i; j++)
63 amd_sched_entity_fini(&adev->rings[j]->sched,
64 &ctx->rings[j].entity);
72 static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
74 struct amdgpu_device *adev = ctx->adev;
80 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
81 for (j = 0; j < amdgpu_sched_jobs; ++j)
82 fence_put(ctx->rings[i].fences[j]);
86 for (i = 0; i < adev->num_rings; i++)
87 amd_sched_entity_fini(&adev->rings[i]->sched,
88 &ctx->rings[i].entity);
91 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
92 struct amdgpu_fpriv *fpriv,
95 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
96 struct amdgpu_ctx *ctx;
99 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
103 mutex_lock(&mgr->lock);
104 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
106 mutex_unlock(&mgr->lock);
111 r = amdgpu_ctx_init(adev, ctx);
113 idr_remove(&mgr->ctx_handles, *id);
117 mutex_unlock(&mgr->lock);
121 static void amdgpu_ctx_do_release(struct kref *ref)
123 struct amdgpu_ctx *ctx;
125 ctx = container_of(ref, struct amdgpu_ctx, refcount);
127 amdgpu_ctx_fini(ctx);
132 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
134 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
135 struct amdgpu_ctx *ctx;
137 mutex_lock(&mgr->lock);
138 ctx = idr_find(&mgr->ctx_handles, id);
140 idr_remove(&mgr->ctx_handles, id);
141 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
142 mutex_unlock(&mgr->lock);
145 mutex_unlock(&mgr->lock);
149 static int amdgpu_ctx_query(struct amdgpu_device *adev,
150 struct amdgpu_fpriv *fpriv, uint32_t id,
151 union drm_amdgpu_ctx_out *out)
153 struct amdgpu_ctx *ctx;
154 struct amdgpu_ctx_mgr *mgr;
155 unsigned reset_counter;
160 mgr = &fpriv->ctx_mgr;
161 mutex_lock(&mgr->lock);
162 ctx = idr_find(&mgr->ctx_handles, id);
164 mutex_unlock(&mgr->lock);
168 /* TODO: these two are always zero */
169 out->state.flags = 0x0;
170 out->state.hangs = 0x0;
172 /* determine if a GPU reset has occured since the last call */
173 reset_counter = atomic_read(&adev->gpu_reset_counter);
174 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
175 if (ctx->reset_counter == reset_counter)
176 out->state.reset_status = AMDGPU_CTX_NO_RESET;
178 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
179 ctx->reset_counter = reset_counter;
181 mutex_unlock(&mgr->lock);
185 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
186 struct drm_file *filp)
191 union drm_amdgpu_ctx *args = data;
192 struct amdgpu_device *adev = dev->dev_private;
193 struct amdgpu_fpriv *fpriv = filp->driver_priv;
196 id = args->in.ctx_id;
198 switch (args->in.op) {
199 case AMDGPU_CTX_OP_ALLOC_CTX:
200 r = amdgpu_ctx_alloc(adev, fpriv, &id);
201 args->out.alloc.ctx_id = id;
203 case AMDGPU_CTX_OP_FREE_CTX:
204 r = amdgpu_ctx_free(fpriv, id);
206 case AMDGPU_CTX_OP_QUERY_STATE:
207 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
216 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
218 struct amdgpu_ctx *ctx;
219 struct amdgpu_ctx_mgr *mgr;
224 mgr = &fpriv->ctx_mgr;
226 mutex_lock(&mgr->lock);
227 ctx = idr_find(&mgr->ctx_handles, id);
229 kref_get(&ctx->refcount);
230 mutex_unlock(&mgr->lock);
234 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
239 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
243 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
246 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
247 uint64_t seq = cring->sequence;
249 struct fence *other = NULL;
251 idx = seq & (amdgpu_sched_jobs - 1);
252 other = cring->fences[idx];
255 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
257 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
262 spin_lock(&ctx->ring_lock);
263 cring->fences[idx] = fence;
265 spin_unlock(&ctx->ring_lock);
272 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
273 struct amdgpu_ring *ring, uint64_t seq)
275 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
278 spin_lock(&ctx->ring_lock);
280 if (seq >= cring->sequence) {
281 spin_unlock(&ctx->ring_lock);
282 return ERR_PTR(-EINVAL);
286 if (seq + amdgpu_sched_jobs < cring->sequence) {
287 spin_unlock(&ctx->ring_lock);
291 fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
292 spin_unlock(&ctx->ring_lock);
297 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
299 mutex_init(&mgr->lock);
300 idr_init(&mgr->ctx_handles);
303 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
305 struct amdgpu_ctx *ctx;
309 idp = &mgr->ctx_handles;
311 idr_for_each_entry(idp, ctx, id) {
312 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
313 DRM_ERROR("ctx %p is still alive\n", ctx);
316 idr_destroy(&mgr->ctx_handles);
317 mutex_destroy(&mgr->lock);