]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[linux.git] / drivers / gpu / drm / amd / amdgpu / vce_v2_0.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <[email protected]>
26  */
27
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "cikd.h"
33 #include "vce/vce_2_0_d.h"
34 #include "vce/vce_2_0_sh_mask.h"
35 #include "smu/smu_7_0_1_d.h"
36 #include "smu/smu_7_0_1_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39
40 #define VCE_V2_0_FW_SIZE        (256 * 1024)
41 #define VCE_V2_0_STACK_SIZE     (64 * 1024)
42 #define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
43 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
44
45 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
46 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
47
48 /**
49  * vce_v2_0_ring_get_rptr - get read pointer
50  *
51  * @ring: amdgpu_ring pointer
52  *
53  * Returns the current hardware read pointer
54  */
55 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
56 {
57         struct amdgpu_device *adev = ring->adev;
58
59         if (ring == &adev->vce.ring[0])
60                 return RREG32(mmVCE_RB_RPTR);
61         else
62                 return RREG32(mmVCE_RB_RPTR2);
63 }
64
65 /**
66  * vce_v2_0_ring_get_wptr - get write pointer
67  *
68  * @ring: amdgpu_ring pointer
69  *
70  * Returns the current hardware write pointer
71  */
72 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
73 {
74         struct amdgpu_device *adev = ring->adev;
75
76         if (ring == &adev->vce.ring[0])
77                 return RREG32(mmVCE_RB_WPTR);
78         else
79                 return RREG32(mmVCE_RB_WPTR2);
80 }
81
82 /**
83  * vce_v2_0_ring_set_wptr - set write pointer
84  *
85  * @ring: amdgpu_ring pointer
86  *
87  * Commits the write pointer to the hardware
88  */
89 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
90 {
91         struct amdgpu_device *adev = ring->adev;
92
93         if (ring == &adev->vce.ring[0])
94                 WREG32(mmVCE_RB_WPTR, ring->wptr);
95         else
96                 WREG32(mmVCE_RB_WPTR2, ring->wptr);
97 }
98
99 static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
100 {
101         int i, j;
102
103         for (i = 0; i < 10; ++i) {
104                 for (j = 0; j < 100; ++j) {
105                         uint32_t status = RREG32(mmVCE_LMI_STATUS);
106
107                         if (status & 0x337f)
108                                 return 0;
109                         mdelay(10);
110                 }
111         }
112
113         return -ETIMEDOUT;
114 }
115
116 static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
117 {
118         int i, j;
119
120         for (i = 0; i < 10; ++i) {
121                 for (j = 0; j < 100; ++j) {
122                         uint32_t status = RREG32(mmVCE_STATUS);
123
124                         if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
125                                 return 0;
126                         mdelay(10);
127                 }
128
129                 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
130                 WREG32_P(mmVCE_SOFT_RESET,
131                         VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
132                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
133                 mdelay(10);
134                 WREG32_P(mmVCE_SOFT_RESET, 0,
135                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
136                 mdelay(10);
137         }
138
139         return -ETIMEDOUT;
140 }
141
142 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
143 {
144         WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
145 }
146
147 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
148 {
149         u32 tmp;
150
151         tmp = RREG32(mmVCE_CLOCK_GATING_A);
152         tmp &= ~0xfff;
153         tmp |= ((0 << 0) | (4 << 4));
154         tmp |= 0x40000;
155         WREG32(mmVCE_CLOCK_GATING_A, tmp);
156
157         tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
158         tmp &= ~0xfff;
159         tmp |= ((0 << 0) | (4 << 4));
160         WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
161
162         tmp = RREG32(mmVCE_CLOCK_GATING_B);
163         tmp |= 0x10;
164         tmp &= ~0x100000;
165         WREG32(mmVCE_CLOCK_GATING_B, tmp);
166 }
167
168 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
169 {
170         uint64_t addr = adev->vce.gpu_addr;
171         uint32_t size;
172
173         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
174         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
175         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
176         WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
177
178         WREG32(mmVCE_LMI_CTRL, 0x00398000);
179         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
180         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
181         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
182         WREG32(mmVCE_LMI_VM_CTRL, 0);
183
184         addr += AMDGPU_VCE_FIRMWARE_OFFSET;
185         size = VCE_V2_0_FW_SIZE;
186         WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
187         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
188
189         addr += size;
190         size = VCE_V2_0_STACK_SIZE;
191         WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
192         WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
193
194         addr += size;
195         size = VCE_V2_0_DATA_SIZE;
196         WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
197         WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
198
199         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
200         WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
201 }
202
203 static bool vce_v2_0_is_idle(void *handle)
204 {
205         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
206
207         return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
208 }
209
210 static int vce_v2_0_wait_for_idle(void *handle)
211 {
212         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
213         unsigned i;
214
215         for (i = 0; i < adev->usec_timeout; i++) {
216                 if (vce_v2_0_is_idle(handle))
217                         return 0;
218         }
219         return -ETIMEDOUT;
220 }
221
222 /**
223  * vce_v2_0_start - start VCE block
224  *
225  * @adev: amdgpu_device pointer
226  *
227  * Setup and start the VCE block
228  */
229 static int vce_v2_0_start(struct amdgpu_device *adev)
230 {
231         struct amdgpu_ring *ring;
232         int r;
233
234         /* set BUSY flag */
235         WREG32_P(mmVCE_STATUS, 1, ~1);
236
237         vce_v2_0_init_cg(adev);
238         vce_v2_0_disable_cg(adev);
239
240         vce_v2_0_mc_resume(adev);
241
242         ring = &adev->vce.ring[0];
243         WREG32(mmVCE_RB_RPTR, ring->wptr);
244         WREG32(mmVCE_RB_WPTR, ring->wptr);
245         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
246         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
247         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
248
249         ring = &adev->vce.ring[1];
250         WREG32(mmVCE_RB_RPTR2, ring->wptr);
251         WREG32(mmVCE_RB_WPTR2, ring->wptr);
252         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
253         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
254         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
255
256         WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
257         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
258         mdelay(100);
259         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
260
261         r = vce_v2_0_firmware_loaded(adev);
262
263         /* clear BUSY flag */
264         WREG32_P(mmVCE_STATUS, 0, ~1);
265
266         if (r) {
267                 DRM_ERROR("VCE not responding, giving up!!!\n");
268                 return r;
269         }
270
271         return 0;
272 }
273
274 static int vce_v2_0_stop(struct amdgpu_device *adev)
275 {
276         int i, j;
277         int status;
278
279         if (vce_v2_0_lmi_clean(adev)) {
280                 DRM_INFO("vce is not idle \n");
281                 return 0;
282         }
283 /*
284         for (i = 0; i < 10; ++i) {
285                 for (j = 0; j < 100; ++j) {
286                         status = RREG32(mmVCE_FW_REG_STATUS);
287                         if (!(status & 1))
288                                 break;
289                         mdelay(1);
290                 }
291                 break;
292         }
293 */
294         if (vce_v2_0_wait_for_idle(adev)) {
295                 DRM_INFO("VCE is busy, Can't set clock gateing");
296                 return 0;
297         }
298
299         /* Stall UMC and register bus before resetting VCPU */
300         WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
301
302         for (i = 0; i < 10; ++i) {
303                 for (j = 0; j < 100; ++j) {
304                         status = RREG32(mmVCE_LMI_STATUS);
305                         if (status & 0x240)
306                                 break;
307                         mdelay(1);
308                 }
309                 break;
310         }
311
312         WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
313
314         /* put LMI, VCPU, RBC etc... into reset */
315         WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
316
317         WREG32(mmVCE_STATUS, 0);
318
319         return 0;
320 }
321
322 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
323 {
324         u32 tmp;
325
326         if (gated) {
327                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
328                 tmp |= 0xe70000;
329                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
330
331                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
332                 tmp |= 0xff000000;
333                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
334
335                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
336                 tmp &= ~0x3fc;
337                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
338
339                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
340         } else {
341                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
342                 tmp |= 0xe7;
343                 tmp &= ~0xe70000;
344                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
345
346                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
347                 tmp |= 0x1fe000;
348                 tmp &= ~0xff000000;
349                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
350
351                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
352                 tmp |= 0x3fc;
353                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
354         }
355 }
356
357 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
358 {
359         u32 orig, tmp;
360
361 /* LMI_MC/LMI_UMC always set in dynamic,
362  * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
363  */
364         tmp = RREG32(mmVCE_CLOCK_GATING_B);
365         tmp &= ~0x00060006;
366
367 /* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
368         if (gated) {
369                 tmp |= 0xe10000;
370                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
371         } else {
372                 tmp |= 0xe1;
373                 tmp &= ~0xe10000;
374                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
375         }
376
377         orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
378         tmp &= ~0x1fe000;
379         tmp &= ~0xff000000;
380         if (tmp != orig)
381                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
382
383         orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
384         tmp &= ~0x3fc;
385         if (tmp != orig)
386                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
387
388         /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
389         WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
390
391         if(gated)
392                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
393 }
394
395 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
396                                                                 bool sw_cg)
397 {
398         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
399                 if (sw_cg)
400                         vce_v2_0_set_sw_cg(adev, true);
401                 else
402                         vce_v2_0_set_dyn_cg(adev, true);
403         } else {
404                 vce_v2_0_disable_cg(adev);
405
406                 if (sw_cg)
407                         vce_v2_0_set_sw_cg(adev, false);
408                 else
409                         vce_v2_0_set_dyn_cg(adev, false);
410         }
411 }
412
413 static int vce_v2_0_early_init(void *handle)
414 {
415         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
416
417         adev->vce.num_rings = 2;
418
419         vce_v2_0_set_ring_funcs(adev);
420         vce_v2_0_set_irq_funcs(adev);
421
422         return 0;
423 }
424
425 static int vce_v2_0_sw_init(void *handle)
426 {
427         struct amdgpu_ring *ring;
428         int r, i;
429         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
430
431         /* VCE */
432         r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
433         if (r)
434                 return r;
435
436         r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
437                 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
438         if (r)
439                 return r;
440
441         r = amdgpu_vce_resume(adev);
442         if (r)
443                 return r;
444
445         for (i = 0; i < adev->vce.num_rings; i++) {
446                 ring = &adev->vce.ring[i];
447                 sprintf(ring->name, "vce%d", i);
448                 r = amdgpu_ring_init(adev, ring, 512,
449                                      &adev->vce.irq, 0);
450                 if (r)
451                         return r;
452         }
453
454         return r;
455 }
456
457 static int vce_v2_0_sw_fini(void *handle)
458 {
459         int r;
460         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
461
462         r = amdgpu_vce_suspend(adev);
463         if (r)
464                 return r;
465
466         r = amdgpu_vce_sw_fini(adev);
467         if (r)
468                 return r;
469
470         return r;
471 }
472
473 static int vce_v2_0_hw_init(void *handle)
474 {
475         int r, i;
476         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
477
478         amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
479         vce_v2_0_enable_mgcg(adev, true, false);
480         for (i = 0; i < adev->vce.num_rings; i++)
481                 adev->vce.ring[i].ready = false;
482
483         for (i = 0; i < adev->vce.num_rings; i++) {
484                 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
485                 if (r)
486                         return r;
487                 else
488                         adev->vce.ring[i].ready = true;
489         }
490
491         DRM_INFO("VCE initialized successfully.\n");
492
493         return 0;
494 }
495
496 static int vce_v2_0_hw_fini(void *handle)
497 {
498         return 0;
499 }
500
501 static int vce_v2_0_suspend(void *handle)
502 {
503         int r;
504         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
505
506         r = vce_v2_0_hw_fini(adev);
507         if (r)
508                 return r;
509
510         r = amdgpu_vce_suspend(adev);
511         if (r)
512                 return r;
513
514         return r;
515 }
516
517 static int vce_v2_0_resume(void *handle)
518 {
519         int r;
520         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
521
522         r = amdgpu_vce_resume(adev);
523         if (r)
524                 return r;
525
526         r = vce_v2_0_hw_init(adev);
527         if (r)
528                 return r;
529
530         return r;
531 }
532
533 static int vce_v2_0_soft_reset(void *handle)
534 {
535         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
536
537         WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
538         mdelay(5);
539
540         return vce_v2_0_start(adev);
541 }
542
543 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
544                                         struct amdgpu_irq_src *source,
545                                         unsigned type,
546                                         enum amdgpu_interrupt_state state)
547 {
548         uint32_t val = 0;
549
550         if (state == AMDGPU_IRQ_STATE_ENABLE)
551                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
552
553         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
554         return 0;
555 }
556
557 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
558                                       struct amdgpu_irq_src *source,
559                                       struct amdgpu_iv_entry *entry)
560 {
561         DRM_DEBUG("IH: VCE\n");
562         switch (entry->src_data) {
563         case 0:
564         case 1:
565                 amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
566                 break;
567         default:
568                 DRM_ERROR("Unhandled interrupt: %d %d\n",
569                           entry->src_id, entry->src_data);
570                 break;
571         }
572
573         return 0;
574 }
575
576 static int vce_v2_0_set_clockgating_state(void *handle,
577                                           enum amd_clockgating_state state)
578 {
579         bool gate = false;
580         bool sw_cg = false;
581
582         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583
584         if (state == AMD_CG_STATE_GATE) {
585                 gate = true;
586                 sw_cg = true;
587         }
588
589         vce_v2_0_enable_mgcg(adev, gate, sw_cg);
590
591         return 0;
592 }
593
594 static int vce_v2_0_set_powergating_state(void *handle,
595                                           enum amd_powergating_state state)
596 {
597         /* This doesn't actually powergate the VCE block.
598          * That's done in the dpm code via the SMC.  This
599          * just re-inits the block as necessary.  The actual
600          * gating still happens in the dpm code.  We should
601          * revisit this when there is a cleaner line between
602          * the smc and the hw blocks
603          */
604         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
605
606         if (state == AMD_PG_STATE_GATE)
607                 return vce_v2_0_stop(adev);
608         else
609                 return vce_v2_0_start(adev);
610 }
611
612 static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
613         .name = "vce_v2_0",
614         .early_init = vce_v2_0_early_init,
615         .late_init = NULL,
616         .sw_init = vce_v2_0_sw_init,
617         .sw_fini = vce_v2_0_sw_fini,
618         .hw_init = vce_v2_0_hw_init,
619         .hw_fini = vce_v2_0_hw_fini,
620         .suspend = vce_v2_0_suspend,
621         .resume = vce_v2_0_resume,
622         .is_idle = vce_v2_0_is_idle,
623         .wait_for_idle = vce_v2_0_wait_for_idle,
624         .soft_reset = vce_v2_0_soft_reset,
625         .set_clockgating_state = vce_v2_0_set_clockgating_state,
626         .set_powergating_state = vce_v2_0_set_powergating_state,
627 };
628
629 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
630         .type = AMDGPU_RING_TYPE_VCE,
631         .align_mask = 0xf,
632         .nop = VCE_CMD_NO_OP,
633         .get_rptr = vce_v2_0_ring_get_rptr,
634         .get_wptr = vce_v2_0_ring_get_wptr,
635         .set_wptr = vce_v2_0_ring_set_wptr,
636         .parse_cs = amdgpu_vce_ring_parse_cs,
637         .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
638         .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
639         .emit_ib = amdgpu_vce_ring_emit_ib,
640         .emit_fence = amdgpu_vce_ring_emit_fence,
641         .test_ring = amdgpu_vce_ring_test_ring,
642         .test_ib = amdgpu_vce_ring_test_ib,
643         .insert_nop = amdgpu_ring_insert_nop,
644         .pad_ib = amdgpu_ring_generic_pad_ib,
645         .begin_use = amdgpu_vce_ring_begin_use,
646         .end_use = amdgpu_vce_ring_end_use,
647 };
648
649 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
650 {
651         int i;
652
653         for (i = 0; i < adev->vce.num_rings; i++)
654                 adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
655 }
656
657 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
658         .set = vce_v2_0_set_interrupt_state,
659         .process = vce_v2_0_process_interrupt,
660 };
661
662 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
663 {
664         adev->vce.irq.num_types = 1;
665         adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
666 };
667
668 const struct amdgpu_ip_block_version vce_v2_0_ip_block =
669 {
670                 .type = AMD_IP_BLOCK_TYPE_VCE,
671                 .major = 2,
672                 .minor = 0,
673                 .rev = 0,
674                 .funcs = &vce_v2_0_ip_funcs,
675 };
This page took 0.069272 seconds and 4 git commands to generate.