]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <[email protected]>
23  */
24
25 #include <drm/drm_auth.h>
26 #include "amdgpu.h"
27 #include "amdgpu_sched.h"
28 #include "amdgpu_ras.h"
29
30 #define to_amdgpu_ctx_entity(e) \
31         container_of((e), struct amdgpu_ctx_entity, entity)
32
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34         [AMDGPU_HW_IP_GFX]      =       1,
35         [AMDGPU_HW_IP_COMPUTE]  =       4,
36         [AMDGPU_HW_IP_DMA]      =       2,
37         [AMDGPU_HW_IP_UVD]      =       1,
38         [AMDGPU_HW_IP_VCE]      =       1,
39         [AMDGPU_HW_IP_UVD_ENC]  =       1,
40         [AMDGPU_HW_IP_VCN_DEC]  =       1,
41         [AMDGPU_HW_IP_VCN_ENC]  =       1,
42         [AMDGPU_HW_IP_VCN_JPEG] =       1,
43 };
44
45 static int amdgpu_ctx_total_num_entities(void)
46 {
47         unsigned i, num_entities = 0;
48
49         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50                 num_entities += amdgpu_ctx_num_entities[i];
51
52         return num_entities;
53 }
54
55 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56                                       enum drm_sched_priority priority)
57 {
58         /* NORMAL and below are accessible by everyone */
59         if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60                 return 0;
61
62         if (capable(CAP_SYS_NICE))
63                 return 0;
64
65         if (drm_is_current_master(filp))
66                 return 0;
67
68         return -EACCES;
69 }
70
71 static int amdgpu_ctx_init(struct amdgpu_device *adev,
72                            enum drm_sched_priority priority,
73                            struct drm_file *filp,
74                            struct amdgpu_ctx *ctx)
75 {
76         unsigned num_entities = amdgpu_ctx_total_num_entities();
77         unsigned i, j;
78         int r;
79
80         if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81                 return -EINVAL;
82
83         r = amdgpu_ctx_priority_permit(filp, priority);
84         if (r)
85                 return r;
86
87         memset(ctx, 0, sizeof(*ctx));
88         ctx->adev = adev;
89
90         ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91                               sizeof(struct dma_fence*), GFP_KERNEL);
92         if (!ctx->fences)
93                 return -ENOMEM;
94
95         ctx->entities[0] = kcalloc(num_entities,
96                                    sizeof(struct amdgpu_ctx_entity),
97                                    GFP_KERNEL);
98         if (!ctx->entities[0]) {
99                 r = -ENOMEM;
100                 goto error_free_fences;
101         }
102
103         for (i = 0; i < num_entities; ++i) {
104                 struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105
106                 entity->sequence = 1;
107                 entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108         }
109         for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110                 ctx->entities[i] = ctx->entities[i - 1] +
111                         amdgpu_ctx_num_entities[i - 1];
112
113         kref_init(&ctx->refcount);
114         spin_lock_init(&ctx->ring_lock);
115         mutex_init(&ctx->lock);
116
117         ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118         ctx->reset_counter_query = ctx->reset_counter;
119         ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120         ctx->init_priority = priority;
121         ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122
123         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124                 struct drm_gpu_scheduler **scheds;
125                 struct drm_gpu_scheduler *sched;
126                 unsigned num_scheds = 0;
127
128                 switch (i) {
129                 case AMDGPU_HW_IP_GFX:
130                         sched = &adev->gfx.gfx_ring[0].sched;
131                         scheds = &sched;
132                         num_scheds = 1;
133                         break;
134                 case AMDGPU_HW_IP_COMPUTE:
135                         scheds = adev->gfx.compute_sched;
136                         num_scheds = adev->gfx.num_compute_sched;
137                         break;
138                 case AMDGPU_HW_IP_DMA:
139                         scheds = adev->sdma.sdma_sched;
140                         num_scheds = adev->sdma.num_sdma_sched;
141                         break;
142                 case AMDGPU_HW_IP_UVD:
143                         sched = &adev->uvd.inst[0].ring.sched;
144                         scheds = &sched;
145                         num_scheds = 1;
146                         break;
147                 case AMDGPU_HW_IP_VCE:
148                         sched = &adev->vce.ring[0].sched;
149                         scheds = &sched;
150                         num_scheds = 1;
151                         break;
152                 case AMDGPU_HW_IP_UVD_ENC:
153                         sched = &adev->uvd.inst[0].ring_enc[0].sched;
154                         scheds = &sched;
155                         num_scheds = 1;
156                         break;
157                 case AMDGPU_HW_IP_VCN_DEC:
158                         scheds = adev->vcn.vcn_dec_sched;
159                         num_scheds =  adev->vcn.num_vcn_dec_sched;
160                         break;
161                 case AMDGPU_HW_IP_VCN_ENC:
162                         scheds = adev->vcn.vcn_enc_sched;
163                         num_scheds =  adev->vcn.num_vcn_enc_sched;
164                         break;
165                 case AMDGPU_HW_IP_VCN_JPEG:
166                         scheds = adev->jpeg.jpeg_sched;
167                         num_scheds =  adev->jpeg.num_jpeg_sched;
168                         break;
169                 }
170
171                 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
172                         r = drm_sched_entity_init(&ctx->entities[i][j].entity,
173                                                   priority, scheds,
174                                                   num_scheds, &ctx->guilty);
175                 if (r)
176                         goto error_cleanup_entities;
177         }
178
179         return 0;
180
181 error_cleanup_entities:
182         for (i = 0; i < num_entities; ++i)
183                 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
184         kfree(ctx->entities[0]);
185
186 error_free_fences:
187         kfree(ctx->fences);
188         ctx->fences = NULL;
189         return r;
190 }
191
192 static void amdgpu_ctx_fini(struct kref *ref)
193 {
194         struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
195         unsigned num_entities = amdgpu_ctx_total_num_entities();
196         struct amdgpu_device *adev = ctx->adev;
197         unsigned i, j;
198
199         if (!adev)
200                 return;
201
202         for (i = 0; i < num_entities; ++i)
203                 for (j = 0; j < amdgpu_sched_jobs; ++j)
204                         dma_fence_put(ctx->entities[0][i].fences[j]);
205         kfree(ctx->fences);
206         kfree(ctx->entities[0]);
207
208         mutex_destroy(&ctx->lock);
209
210         kfree(ctx);
211 }
212
213 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
214                           u32 ring, struct drm_sched_entity **entity)
215 {
216         if (hw_ip >= AMDGPU_HW_IP_NUM) {
217                 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
218                 return -EINVAL;
219         }
220
221         /* Right now all IPs have only one instance - multiple rings. */
222         if (instance != 0) {
223                 DRM_DEBUG("invalid ip instance: %d\n", instance);
224                 return -EINVAL;
225         }
226
227         if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
228                 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
229                 return -EINVAL;
230         }
231
232         *entity = &ctx->entities[hw_ip][ring].entity;
233         return 0;
234 }
235
236 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
237                             struct amdgpu_fpriv *fpriv,
238                             struct drm_file *filp,
239                             enum drm_sched_priority priority,
240                             uint32_t *id)
241 {
242         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
243         struct amdgpu_ctx *ctx;
244         int r;
245
246         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
247         if (!ctx)
248                 return -ENOMEM;
249
250         mutex_lock(&mgr->lock);
251         r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
252         if (r < 0) {
253                 mutex_unlock(&mgr->lock);
254                 kfree(ctx);
255                 return r;
256         }
257
258         *id = (uint32_t)r;
259         r = amdgpu_ctx_init(adev, priority, filp, ctx);
260         if (r) {
261                 idr_remove(&mgr->ctx_handles, *id);
262                 *id = 0;
263                 kfree(ctx);
264         }
265         mutex_unlock(&mgr->lock);
266         return r;
267 }
268
269 static void amdgpu_ctx_do_release(struct kref *ref)
270 {
271         struct amdgpu_ctx *ctx;
272         unsigned num_entities;
273         u32 i;
274
275         ctx = container_of(ref, struct amdgpu_ctx, refcount);
276
277         num_entities = amdgpu_ctx_total_num_entities();
278         for (i = 0; i < num_entities; i++)
279                 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
280
281         amdgpu_ctx_fini(ref);
282 }
283
284 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
285 {
286         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
287         struct amdgpu_ctx *ctx;
288
289         mutex_lock(&mgr->lock);
290         ctx = idr_remove(&mgr->ctx_handles, id);
291         if (ctx)
292                 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
293         mutex_unlock(&mgr->lock);
294         return ctx ? 0 : -EINVAL;
295 }
296
297 static int amdgpu_ctx_query(struct amdgpu_device *adev,
298                             struct amdgpu_fpriv *fpriv, uint32_t id,
299                             union drm_amdgpu_ctx_out *out)
300 {
301         struct amdgpu_ctx *ctx;
302         struct amdgpu_ctx_mgr *mgr;
303         unsigned reset_counter;
304
305         if (!fpriv)
306                 return -EINVAL;
307
308         mgr = &fpriv->ctx_mgr;
309         mutex_lock(&mgr->lock);
310         ctx = idr_find(&mgr->ctx_handles, id);
311         if (!ctx) {
312                 mutex_unlock(&mgr->lock);
313                 return -EINVAL;
314         }
315
316         /* TODO: these two are always zero */
317         out->state.flags = 0x0;
318         out->state.hangs = 0x0;
319
320         /* determine if a GPU reset has occured since the last call */
321         reset_counter = atomic_read(&adev->gpu_reset_counter);
322         /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
323         if (ctx->reset_counter_query == reset_counter)
324                 out->state.reset_status = AMDGPU_CTX_NO_RESET;
325         else
326                 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
327         ctx->reset_counter_query = reset_counter;
328
329         mutex_unlock(&mgr->lock);
330         return 0;
331 }
332
333 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
334         struct amdgpu_fpriv *fpriv, uint32_t id,
335         union drm_amdgpu_ctx_out *out)
336 {
337         struct amdgpu_ctx *ctx;
338         struct amdgpu_ctx_mgr *mgr;
339         unsigned long ras_counter;
340
341         if (!fpriv)
342                 return -EINVAL;
343
344         mgr = &fpriv->ctx_mgr;
345         mutex_lock(&mgr->lock);
346         ctx = idr_find(&mgr->ctx_handles, id);
347         if (!ctx) {
348                 mutex_unlock(&mgr->lock);
349                 return -EINVAL;
350         }
351
352         out->state.flags = 0x0;
353         out->state.hangs = 0x0;
354
355         if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
356                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
357
358         if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
359                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
360
361         if (atomic_read(&ctx->guilty))
362                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
363
364         /*query ue count*/
365         ras_counter = amdgpu_ras_query_error_count(adev, false);
366         /*ras counter is monotonic increasing*/
367         if (ras_counter != ctx->ras_counter_ue) {
368                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
369                 ctx->ras_counter_ue = ras_counter;
370         }
371
372         /*query ce count*/
373         ras_counter = amdgpu_ras_query_error_count(adev, true);
374         if (ras_counter != ctx->ras_counter_ce) {
375                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
376                 ctx->ras_counter_ce = ras_counter;
377         }
378
379         mutex_unlock(&mgr->lock);
380         return 0;
381 }
382
383 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
384                      struct drm_file *filp)
385 {
386         int r;
387         uint32_t id;
388         enum drm_sched_priority priority;
389
390         union drm_amdgpu_ctx *args = data;
391         struct amdgpu_device *adev = dev->dev_private;
392         struct amdgpu_fpriv *fpriv = filp->driver_priv;
393
394         r = 0;
395         id = args->in.ctx_id;
396         priority = amdgpu_to_sched_priority(args->in.priority);
397
398         /* For backwards compatibility reasons, we need to accept
399          * ioctls with garbage in the priority field */
400         if (priority == DRM_SCHED_PRIORITY_INVALID)
401                 priority = DRM_SCHED_PRIORITY_NORMAL;
402
403         switch (args->in.op) {
404         case AMDGPU_CTX_OP_ALLOC_CTX:
405                 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
406                 args->out.alloc.ctx_id = id;
407                 break;
408         case AMDGPU_CTX_OP_FREE_CTX:
409                 r = amdgpu_ctx_free(fpriv, id);
410                 break;
411         case AMDGPU_CTX_OP_QUERY_STATE:
412                 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
413                 break;
414         case AMDGPU_CTX_OP_QUERY_STATE2:
415                 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
416                 break;
417         default:
418                 return -EINVAL;
419         }
420
421         return r;
422 }
423
424 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
425 {
426         struct amdgpu_ctx *ctx;
427         struct amdgpu_ctx_mgr *mgr;
428
429         if (!fpriv)
430                 return NULL;
431
432         mgr = &fpriv->ctx_mgr;
433
434         mutex_lock(&mgr->lock);
435         ctx = idr_find(&mgr->ctx_handles, id);
436         if (ctx)
437                 kref_get(&ctx->refcount);
438         mutex_unlock(&mgr->lock);
439         return ctx;
440 }
441
442 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
443 {
444         if (ctx == NULL)
445                 return -EINVAL;
446
447         kref_put(&ctx->refcount, amdgpu_ctx_do_release);
448         return 0;
449 }
450
451 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
452                           struct drm_sched_entity *entity,
453                           struct dma_fence *fence, uint64_t* handle)
454 {
455         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
456         uint64_t seq = centity->sequence;
457         struct dma_fence *other = NULL;
458         unsigned idx = 0;
459
460         idx = seq & (amdgpu_sched_jobs - 1);
461         other = centity->fences[idx];
462         if (other)
463                 BUG_ON(!dma_fence_is_signaled(other));
464
465         dma_fence_get(fence);
466
467         spin_lock(&ctx->ring_lock);
468         centity->fences[idx] = fence;
469         centity->sequence++;
470         spin_unlock(&ctx->ring_lock);
471
472         dma_fence_put(other);
473         if (handle)
474                 *handle = seq;
475 }
476
477 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
478                                        struct drm_sched_entity *entity,
479                                        uint64_t seq)
480 {
481         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
482         struct dma_fence *fence;
483
484         spin_lock(&ctx->ring_lock);
485
486         if (seq == ~0ull)
487                 seq = centity->sequence - 1;
488
489         if (seq >= centity->sequence) {
490                 spin_unlock(&ctx->ring_lock);
491                 return ERR_PTR(-EINVAL);
492         }
493
494
495         if (seq + amdgpu_sched_jobs < centity->sequence) {
496                 spin_unlock(&ctx->ring_lock);
497                 return NULL;
498         }
499
500         fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
501         spin_unlock(&ctx->ring_lock);
502
503         return fence;
504 }
505
506 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
507                                   enum drm_sched_priority priority)
508 {
509         unsigned num_entities = amdgpu_ctx_total_num_entities();
510         enum drm_sched_priority ctx_prio;
511         unsigned i;
512
513         ctx->override_priority = priority;
514
515         ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
516                         ctx->init_priority : ctx->override_priority;
517
518         for (i = 0; i < num_entities; i++) {
519                 struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
520
521                 drm_sched_entity_set_priority(entity, ctx_prio);
522         }
523 }
524
525 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
526                                struct drm_sched_entity *entity)
527 {
528         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
529         struct dma_fence *other;
530         unsigned idx;
531         long r;
532
533         spin_lock(&ctx->ring_lock);
534         idx = centity->sequence & (amdgpu_sched_jobs - 1);
535         other = dma_fence_get(centity->fences[idx]);
536         spin_unlock(&ctx->ring_lock);
537
538         if (!other)
539                 return 0;
540
541         r = dma_fence_wait(other, true);
542         if (r < 0 && r != -ERESTARTSYS)
543                 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
544
545         dma_fence_put(other);
546         return r;
547 }
548
549 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
550 {
551         mutex_init(&mgr->lock);
552         idr_init(&mgr->ctx_handles);
553 }
554
555 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
556 {
557         unsigned num_entities = amdgpu_ctx_total_num_entities();
558         struct amdgpu_ctx *ctx;
559         struct idr *idp;
560         uint32_t id, i;
561
562         idp = &mgr->ctx_handles;
563
564         mutex_lock(&mgr->lock);
565         idr_for_each_entry(idp, ctx, id) {
566                 for (i = 0; i < num_entities; i++) {
567                         struct drm_sched_entity *entity;
568
569                         entity = &ctx->entities[0][i].entity;
570                         timeout = drm_sched_entity_flush(entity, timeout);
571                 }
572         }
573         mutex_unlock(&mgr->lock);
574         return timeout;
575 }
576
577 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
578 {
579         unsigned num_entities = amdgpu_ctx_total_num_entities();
580         struct amdgpu_ctx *ctx;
581         struct idr *idp;
582         uint32_t id, i;
583
584         idp = &mgr->ctx_handles;
585
586         idr_for_each_entry(idp, ctx, id) {
587                 if (kref_read(&ctx->refcount) != 1) {
588                         DRM_ERROR("ctx %p is still alive\n", ctx);
589                         continue;
590                 }
591
592                 for (i = 0; i < num_entities; i++)
593                         drm_sched_entity_fini(&ctx->entities[0][i].entity);
594         }
595 }
596
597 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
598 {
599         struct amdgpu_ctx *ctx;
600         struct idr *idp;
601         uint32_t id;
602
603         amdgpu_ctx_mgr_entity_fini(mgr);
604
605         idp = &mgr->ctx_handles;
606
607         idr_for_each_entry(idp, ctx, id) {
608                 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
609                         DRM_ERROR("ctx %p is still alive\n", ctx);
610         }
611
612         idr_destroy(&mgr->ctx_handles);
613         mutex_destroy(&mgr->lock);
614 }
615
616 void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
617 {
618         int i, j;
619
620         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
621                 adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
622                 adev->gfx.num_gfx_sched++;
623         }
624
625         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
626                 adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
627                 adev->gfx.num_compute_sched++;
628         }
629
630         for (i = 0; i < adev->sdma.num_instances; i++) {
631                 adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
632                 adev->sdma.num_sdma_sched++;
633         }
634
635         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
636                 if (adev->vcn.harvest_config & (1 << i))
637                         continue;
638                 adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
639                         &adev->vcn.inst[i].ring_dec.sched;
640         }
641
642         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
643                 if (adev->vcn.harvest_config & (1 << i))
644                         continue;
645                 for (j = 0; j < adev->vcn.num_enc_rings; ++j)
646                         adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
647                                 &adev->vcn.inst[i].ring_enc[j].sched;
648         }
649
650         for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
651                 if (adev->jpeg.harvest_config & (1 << i))
652                         continue;
653                 adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
654                         &adev->jpeg.inst[i].ring_dec.sched;
655         }
656 }
This page took 0.067881 seconds and 4 git commands to generate.