1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2024 Advanced Micro Devices, Inc. All rights reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
29 #include "jpeg_v4_0_3.h"
30 #include "jpeg_v5_0_1.h"
32 #include "vcn/vcn_5_0_0_offset.h"
33 #include "vcn/vcn_5_0_0_sh_mask.h"
34 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
36 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
37 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
38 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
39 enum amd_powergating_state state);
40 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
42 static int amdgpu_ih_srcid_jpeg[] = {
43 VCN_5_0__SRCID__JPEG_DECODE,
44 VCN_5_0__SRCID__JPEG1_DECODE,
45 VCN_5_0__SRCID__JPEG2_DECODE,
46 VCN_5_0__SRCID__JPEG3_DECODE,
47 VCN_5_0__SRCID__JPEG4_DECODE,
48 VCN_5_0__SRCID__JPEG5_DECODE,
49 VCN_5_0__SRCID__JPEG6_DECODE,
50 VCN_5_0__SRCID__JPEG7_DECODE,
51 VCN_5_0__SRCID__JPEG8_DECODE,
52 VCN_5_0__SRCID__JPEG9_DECODE,
55 static int jpeg_v5_0_1_core_reg_offset(u32 pipe)
57 if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3)
58 return ((0x40 * pipe) - 0xc80);
60 return ((0x40 * pipe) - 0x440);
64 * jpeg_v5_0_1_early_init - set function pointers
66 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
68 * Set ring and irq function pointers
70 static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
72 struct amdgpu_device *adev = ip_block->adev;
74 if (!adev->jpeg.num_jpeg_inst || adev->jpeg.num_jpeg_inst > AMDGPU_MAX_JPEG_INSTANCES)
77 adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
78 jpeg_v5_0_1_set_dec_ring_funcs(adev);
79 jpeg_v5_0_1_set_irq_funcs(adev);
85 * jpeg_v5_0_1_sw_init - sw init for JPEG block
87 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
89 * Load firmware and sw initialization
91 static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
93 struct amdgpu_device *adev = ip_block->adev;
94 struct amdgpu_ring *ring;
95 int i, j, r, jpeg_inst;
97 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
99 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
105 r = amdgpu_jpeg_sw_init(adev);
109 r = amdgpu_jpeg_resume(adev);
113 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
114 jpeg_inst = GET_INST(JPEG, i);
116 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
117 ring = &adev->jpeg.inst[i].ring_dec[j];
118 ring->use_doorbell = false;
119 ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
120 if (!amdgpu_sriov_vf(adev)) {
121 ring->doorbell_index =
122 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
123 1 + j + 11 * jpeg_inst;
126 ring->doorbell_index =
127 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
128 4 + j + 32 * jpeg_inst;
130 ring->doorbell_index =
131 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
132 8 + j + 32 * jpeg_inst;
134 sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
135 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
136 AMDGPU_RING_PRIO_DEFAULT, NULL);
140 adev->jpeg.internal.jpeg_pitch[j] =
141 regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
142 adev->jpeg.inst[i].external.jpeg_pitch[j] =
143 SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC_SCRATCH0,
144 (j ? jpeg_v5_0_1_core_reg_offset(j) : 0));
152 * jpeg_v5_0_1_sw_fini - sw fini for JPEG block
154 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
156 * JPEG suspend and free up sw allocation
158 static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
160 struct amdgpu_device *adev = ip_block->adev;
163 r = amdgpu_jpeg_suspend(adev);
167 r = amdgpu_jpeg_sw_fini(adev);
173 * jpeg_v5_0_1_hw_init - start and test JPEG block
175 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
178 static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
180 struct amdgpu_device *adev = ip_block->adev;
181 struct amdgpu_ring *ring;
182 int i, j, r, jpeg_inst;
184 if (amdgpu_sriov_vf(adev)) {
185 /* jpeg_v5_0_1_start_sriov(adev); */
186 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
187 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
188 ring = &adev->jpeg.inst[i].ring_dec[j];
191 jpeg_v5_0_1_dec_ring_set_wptr(ring);
192 ring->sched.ready = true;
197 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
198 jpeg_inst = GET_INST(JPEG, i);
199 ring = adev->jpeg.inst[i].ring_dec;
200 if (ring->use_doorbell)
201 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
202 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * jpeg_inst,
203 adev->jpeg.inst[i].aid_id);
205 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
206 ring = &adev->jpeg.inst[i].ring_dec[j];
207 if (ring->use_doorbell)
208 WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
209 (ring->pipe ? (ring->pipe - 0x15) : 0),
210 ring->doorbell_index <<
211 VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
212 VCN_JPEG_DB_CTRL__EN_MASK);
213 r = amdgpu_ring_test_helper(ring);
223 * jpeg_v5_0_1_hw_fini - stop the hardware block
225 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
227 * Stop the JPEG block, mark ring as not ready any more
229 static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
231 struct amdgpu_device *adev = ip_block->adev;
234 cancel_delayed_work_sync(&adev->jpeg.idle_work);
236 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
237 ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
243 * jpeg_v5_0_1_suspend - suspend JPEG block
245 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
247 * HW fini and suspend JPEG block
249 static int jpeg_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
251 struct amdgpu_device *adev = ip_block->adev;
254 r = jpeg_v5_0_1_hw_fini(ip_block);
258 r = amdgpu_jpeg_suspend(adev);
264 * jpeg_v5_0_1_resume - resume JPEG block
266 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
268 * Resume firmware and hw init JPEG block
270 static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
272 struct amdgpu_device *adev = ip_block->adev;
275 r = amdgpu_jpeg_resume(adev);
279 r = jpeg_v5_0_1_hw_init(ip_block);
284 static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx)
288 jpeg_inst = GET_INST(JPEG, inst_idx);
289 /* disable anti hang mechanism */
290 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
291 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
293 /* keep the JPEG in static PG mode */
294 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
295 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
300 static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
304 jpeg_inst = GET_INST(JPEG, inst_idx);
305 /* enable anti hang mechanism */
306 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
307 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
308 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
314 * jpeg_v5_0_1_start - start JPEG block
316 * @adev: amdgpu_device pointer
318 * Setup and start the JPEG block
320 static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
322 struct amdgpu_ring *ring;
323 int i, j, jpeg_inst, r;
325 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
326 jpeg_inst = GET_INST(JPEG, i);
328 /* disable antihang */
329 r = jpeg_v5_0_1_disable_antihang(adev, i);
333 /* MJPEG global tiling registers */
334 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
335 adev->gfx.config.gb_addr_config);
337 /* enable JMI channel */
338 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
339 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
341 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
342 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
345 ring = &adev->jpeg.inst[i].ring_dec[j];
347 /* enable System Interrupt for JRBC */
348 reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
349 if (j < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
350 data = JPEG_SYS_INT_EN__DJRBC0_MASK << j;
351 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j);
352 WREG32_P(reg, data, mask);
354 data = JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12);
355 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12));
356 WREG32_P(reg, data, mask);
359 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
360 regUVD_LMI_JRBC_RB_VMID,
362 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
365 (0x00000001L | 0x00000002L));
366 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
367 regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
368 reg_offset, lower_32_bits(ring->gpu_addr));
369 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
370 regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
371 reg_offset, upper_32_bits(ring->gpu_addr));
372 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
375 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
378 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
380 reg_offset, 0x00000002L);
381 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
383 reg_offset, ring->ring_size / 4);
384 ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
393 * jpeg_v5_0_1_stop - stop JPEG block
395 * @adev: amdgpu_device pointer
397 * stop the JPEG block
399 static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
403 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
404 jpeg_inst = GET_INST(JPEG, i);
406 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
407 UVD_JMI_CNTL__SOFT_RESET_MASK,
408 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
410 /* enable antihang */
411 r = jpeg_v5_0_1_enable_antihang(adev, i);
420 * jpeg_v5_0_1_dec_ring_get_rptr - get read pointer
422 * @ring: amdgpu_ring pointer
424 * Returns the current hardware read pointer
426 static uint64_t jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring *ring)
428 struct amdgpu_device *adev = ring->adev;
430 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_RPTR,
431 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
435 * jpeg_v5_0_1_dec_ring_get_wptr - get write pointer
437 * @ring: amdgpu_ring pointer
439 * Returns the current hardware write pointer
441 static uint64_t jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring *ring)
443 struct amdgpu_device *adev = ring->adev;
445 if (ring->use_doorbell)
446 return adev->wb.wb[ring->wptr_offs];
448 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_WPTR,
449 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
453 * jpeg_v5_0_1_dec_ring_set_wptr - set write pointer
455 * @ring: amdgpu_ring pointer
457 * Commits the write pointer to the hardware
459 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
461 struct amdgpu_device *adev = ring->adev;
463 if (ring->use_doorbell) {
464 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
465 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
467 WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
469 (ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0),
470 lower_32_bits(ring->wptr));
474 static bool jpeg_v5_0_1_is_idle(void *handle)
476 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
480 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
481 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
482 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
484 ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i),
485 regUVD_JRBC_STATUS, reg_offset) &
486 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
487 UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
494 static int jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
496 struct amdgpu_device *adev = ip_block->adev;
500 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
501 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
502 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
504 ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i),
505 regUVD_JRBC_STATUS, reg_offset,
506 UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
507 UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
513 static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
514 enum amd_clockgating_state state)
516 struct amdgpu_device *adev = ip_block->adev;
517 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
524 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
525 if (!jpeg_v5_0_1_is_idle(adev))
532 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
533 enum amd_powergating_state state)
535 struct amdgpu_device *adev = ip_block->adev;
538 if (state == adev->jpeg.cur_state)
541 if (state == AMD_PG_STATE_GATE)
542 ret = jpeg_v5_0_1_stop(adev);
544 ret = jpeg_v5_0_1_start(adev);
547 adev->jpeg.cur_state = state;
552 static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
553 struct amdgpu_irq_src *source,
555 enum amdgpu_interrupt_state state)
560 static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
561 struct amdgpu_irq_src *source,
562 struct amdgpu_iv_entry *entry)
566 i = node_id_to_phys_map[entry->node_id];
567 DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
569 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
570 if (adev->jpeg.inst[inst].aid_id == i)
573 if (inst >= adev->jpeg.num_jpeg_inst) {
574 dev_WARN_ONCE(adev->dev, 1,
575 "Interrupt received for unknown JPEG instance %d",
580 switch (entry->src_id) {
581 case VCN_5_0__SRCID__JPEG_DECODE:
582 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
584 case VCN_5_0__SRCID__JPEG1_DECODE:
585 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
587 case VCN_5_0__SRCID__JPEG2_DECODE:
588 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
590 case VCN_5_0__SRCID__JPEG3_DECODE:
591 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
593 case VCN_5_0__SRCID__JPEG4_DECODE:
594 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
596 case VCN_5_0__SRCID__JPEG5_DECODE:
597 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
599 case VCN_5_0__SRCID__JPEG6_DECODE:
600 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
602 case VCN_5_0__SRCID__JPEG7_DECODE:
603 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
605 case VCN_5_0__SRCID__JPEG8_DECODE:
606 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[8]);
608 case VCN_5_0__SRCID__JPEG9_DECODE:
609 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[9]);
612 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
613 entry->src_id, entry->src_data[0]);
620 static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
621 .name = "jpeg_v5_0_1",
622 .early_init = jpeg_v5_0_1_early_init,
624 .sw_init = jpeg_v5_0_1_sw_init,
625 .sw_fini = jpeg_v5_0_1_sw_fini,
626 .hw_init = jpeg_v5_0_1_hw_init,
627 .hw_fini = jpeg_v5_0_1_hw_fini,
628 .suspend = jpeg_v5_0_1_suspend,
629 .resume = jpeg_v5_0_1_resume,
630 .is_idle = jpeg_v5_0_1_is_idle,
631 .wait_for_idle = jpeg_v5_0_1_wait_for_idle,
632 .check_soft_reset = NULL,
633 .pre_soft_reset = NULL,
635 .post_soft_reset = NULL,
636 .set_clockgating_state = jpeg_v5_0_1_set_clockgating_state,
637 .set_powergating_state = jpeg_v5_0_1_set_powergating_state,
638 .dump_ip_state = NULL,
639 .print_ip_state = NULL,
642 static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
643 .type = AMDGPU_RING_TYPE_VCN_JPEG,
645 .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
646 .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
647 .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
649 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
650 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
651 8 + /* jpeg_v5_0_1_dec_ring_emit_vm_flush */
652 22 + 22 + /* jpeg_v5_0_1_dec_ring_emit_fence x2 vm fence */
654 .emit_ib_size = 22, /* jpeg_v5_0_1_dec_ring_emit_ib */
655 .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
656 .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
657 .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
658 .test_ring = amdgpu_jpeg_dec_ring_test_ring,
659 .test_ib = amdgpu_jpeg_dec_ring_test_ib,
660 .insert_nop = jpeg_v4_0_3_dec_ring_nop,
661 .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
662 .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
663 .pad_ib = amdgpu_ring_generic_pad_ib,
664 .begin_use = amdgpu_jpeg_ring_begin_use,
665 .end_use = amdgpu_jpeg_ring_end_use,
666 .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
667 .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
668 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
671 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
675 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
676 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
677 adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v5_0_1_dec_ring_vm_funcs;
678 adev->jpeg.inst[i].ring_dec[j].me = i;
679 adev->jpeg.inst[i].ring_dec[j].pipe = j;
681 jpeg_inst = GET_INST(JPEG, i);
682 adev->jpeg.inst[i].aid_id =
683 jpeg_inst / adev->jpeg.num_inst_per_aid;
687 static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
688 .set = jpeg_v5_0_1_set_interrupt_state,
689 .process = jpeg_v5_0_1_process_interrupt,
692 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
696 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
697 adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
699 adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
702 const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
703 .type = AMD_IP_BLOCK_TYPE_JPEG,
707 .funcs = &jpeg_v5_0_1_ip_funcs,