2 * Copyright 2023 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
29 #include "jpeg_v2_0.h"
30 #include "jpeg_v4_0_5.h"
31 #include "mmsch_v4_0.h"
33 #include "vcn/vcn_4_0_5_offset.h"
34 #include "vcn/vcn_4_0_5_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
37 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
38 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
39 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
40 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
42 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
43 #define regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET 0x4026
44 #define regJPEG_SYS_INT_EN_INTERNAL_OFFSET 0x4141
45 #define regJPEG_CGC_CTRL_INTERNAL_OFFSET 0x4161
46 #define regJPEG_CGC_GATE_INTERNAL_OFFSET 0x4160
47 #define regUVD_NO_OP_INTERNAL_OFFSET 0x0029
49 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
50 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
51 static int jpeg_v4_0_5_set_powergating_state(void *handle,
52 enum amd_powergating_state state);
54 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
56 static int amdgpu_ih_clientid_jpeg[] = {
57 SOC15_IH_CLIENTID_VCN,
58 SOC15_IH_CLIENTID_VCN1
62 * jpeg_v4_0_5_early_init - set function pointers
64 * @handle: amdgpu_device pointer
66 * Set ring and irq function pointers
68 static int jpeg_v4_0_5_early_init(void *handle)
70 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
72 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
73 case IP_VERSION(4, 0, 5):
74 adev->jpeg.num_jpeg_inst = 1;
76 case IP_VERSION(4, 0, 6):
77 adev->jpeg.num_jpeg_inst = 2;
80 DRM_DEV_ERROR(adev->dev,
81 "Failed to init vcn ip block(UVD_HWIP:0x%x)\n",
82 amdgpu_ip_version(adev, UVD_HWIP, 0));
86 adev->jpeg.num_jpeg_rings = 1;
88 jpeg_v4_0_5_set_dec_ring_funcs(adev);
89 jpeg_v4_0_5_set_irq_funcs(adev);
95 * jpeg_v4_0_5_sw_init - sw init for JPEG block
97 * @handle: amdgpu_device pointer
99 * Load firmware and sw initialization
101 static int jpeg_v4_0_5_sw_init(void *handle)
103 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
104 struct amdgpu_ring *ring;
107 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
108 if (adev->jpeg.harvest_config & (1 << i))
112 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
113 VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
117 /* JPEG DJPEG POISON EVENT */
118 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
119 VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
123 /* JPEG EJPEG POISON EVENT */
124 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
125 VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
130 r = amdgpu_jpeg_sw_init(adev);
134 r = amdgpu_jpeg_resume(adev);
138 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
139 if (adev->jpeg.harvest_config & (1 << i))
142 ring = adev->jpeg.inst[i].ring_dec;
143 ring->use_doorbell = true;
144 ring->vm_hub = AMDGPU_MMHUB0(0);
145 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
146 sprintf(ring->name, "jpeg_dec_%d", i);
147 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
148 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
152 adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
153 adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH);
160 * jpeg_v4_0_5_sw_fini - sw fini for JPEG block
162 * @handle: amdgpu_device pointer
164 * JPEG suspend and free up sw allocation
166 static int jpeg_v4_0_5_sw_fini(void *handle)
168 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
171 r = amdgpu_jpeg_suspend(adev);
175 r = amdgpu_jpeg_sw_fini(adev);
181 * jpeg_v4_0_5_hw_init - start and test JPEG block
183 * @handle: amdgpu_device pointer
186 static int jpeg_v4_0_5_hw_init(void *handle)
188 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
189 struct amdgpu_ring *ring;
192 // TODO: Enable ring test with DPG support
193 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
194 DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully under DPG Mode");
198 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
199 if (adev->jpeg.harvest_config & (1 << i))
202 ring = adev->jpeg.inst[i].ring_dec;
203 r = amdgpu_ring_test_helper(ring);
209 DRM_INFO("JPEG decode initialized successfully under SPG Mode\n");
215 * jpeg_v4_0_5_hw_fini - stop the hardware block
217 * @handle: amdgpu_device pointer
219 * Stop the JPEG block, mark ring as not ready any more
221 static int jpeg_v4_0_5_hw_fini(void *handle)
223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 cancel_delayed_work_sync(&adev->vcn.idle_work);
228 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
229 if (adev->jpeg.harvest_config & (1 << i))
232 if (!amdgpu_sriov_vf(adev)) {
233 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
234 RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS))
235 jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
242 * jpeg_v4_0_5_suspend - suspend JPEG block
244 * @handle: amdgpu_device pointer
246 * HW fini and suspend JPEG block
248 static int jpeg_v4_0_5_suspend(void *handle)
250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
253 r = jpeg_v4_0_5_hw_fini(adev);
257 r = amdgpu_jpeg_suspend(adev);
263 * jpeg_v4_0_5_resume - resume JPEG block
265 * @handle: amdgpu_device pointer
267 * Resume firmware and hw init JPEG block
269 static int jpeg_v4_0_5_resume(void *handle)
271 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
274 r = amdgpu_jpeg_resume(adev);
278 r = jpeg_v4_0_5_hw_init(adev);
283 static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
287 data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
288 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
289 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
290 data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK);
292 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
295 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
296 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
297 WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
299 data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
300 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
301 | JPEG_CGC_GATE__JPEG2_DEC_MASK
302 | JPEG_CGC_GATE__JMCIF_MASK
303 | JPEG_CGC_GATE__JRBBM_MASK);
304 WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
307 static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
311 data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
312 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
313 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
314 data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK;
316 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
319 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
320 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
321 WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
323 data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
324 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
325 |JPEG_CGC_GATE__JPEG2_DEC_MASK
326 |JPEG_CGC_GATE__JMCIF_MASK
327 |JPEG_CGC_GATE__JRBBM_MASK);
328 WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
331 static void jpeg_engine_4_0_5_dpg_clock_gating_mode(struct amdgpu_device *adev,
332 int inst_idx, uint8_t indirect)
336 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
337 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
339 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
341 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
342 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
343 WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_CTRL_INTERNAL_OFFSET, data, indirect);
346 WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_GATE_INTERNAL_OFFSET,
350 static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
352 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
353 WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
354 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
355 SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
356 0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
359 /* disable anti hang mechanism */
360 WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
361 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
363 /* keep the JPEG in static PG mode */
364 WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
365 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
370 static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
372 /* enable anti hang mechanism */
373 WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS),
374 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
375 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
377 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
378 WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
379 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
380 SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
381 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
382 UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
389 * jpeg_v4_0_5_start_dpg_mode - Jpeg start with dpg mode
391 * @adev: amdgpu_device pointer
392 * @inst_idx: instance number index
393 * @indirect: indirectly write sram
395 * Start JPEG block with dpg mode
397 static void jpeg_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
399 struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
400 uint32_t reg_data = 0;
402 /* enable anti hang mechanism */
403 reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
404 reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
406 WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
408 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
409 WREG32(SOC15_REG_OFFSET(JPEG, inst_idx, regUVD_IPX_DLDO_CONFIG),
410 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
411 SOC15_WAIT_ON_RREG(JPEG, inst_idx, regUVD_IPX_DLDO_STATUS,
412 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
413 UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
416 reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
417 reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
418 WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
421 adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
422 (uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
424 jpeg_engine_4_0_5_dpg_clock_gating_mode(adev, inst_idx, indirect);
426 /* MJPEG global tiling registers */
427 WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET,
428 adev->gfx.config.gb_addr_config, indirect);
429 /* enable System Interrupt for JRBC */
430 WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_SYS_INT_EN_INTERNAL_OFFSET,
431 JPEG_SYS_INT_EN__DJRBC_MASK, indirect);
433 /* add nop to workaround PSP size check */
434 WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regUVD_NO_OP_INTERNAL_OFFSET, 0, indirect);
437 amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
439 WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
440 WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
441 WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
442 lower_32_bits(ring->gpu_addr));
443 WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
444 upper_32_bits(ring->gpu_addr));
445 WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0);
446 WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0);
447 WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L);
448 WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
449 ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR);
453 * jpeg_v4_0_5_stop_dpg_mode - Jpeg stop with dpg mode
455 * @adev: amdgpu_device pointer
456 * @inst_idx: instance number index
458 * Stop JPEG block with dpg mode
460 static void jpeg_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
462 uint32_t reg_data = 0;
464 reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
465 reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
466 WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
471 * jpeg_v4_0_5_start - start JPEG block
473 * @adev: amdgpu_device pointer
475 * Setup and start the JPEG block
477 static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
479 struct amdgpu_ring *ring;
482 if (adev->pm.dpm_enabled)
483 amdgpu_dpm_enable_jpeg(adev, true);
485 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
486 if (adev->jpeg.harvest_config & (1 << i))
489 ring = adev->jpeg.inst[i].ring_dec;
490 /* doorbell programming is done for every playback */
491 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
492 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
494 WREG32_SOC15(VCN, i, regVCN_JPEG_DB_CTRL,
495 ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
496 VCN_JPEG_DB_CTRL__EN_MASK);
498 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
499 jpeg_v4_0_5_start_dpg_mode(adev, i, adev->jpeg.indirect_sram);
503 /* disable power gating */
504 r = jpeg_v4_0_5_disable_static_power_gating(adev, i);
508 /* JPEG disable CGC */
509 jpeg_v4_0_5_disable_clock_gating(adev, i);
511 /* MJPEG global tiling registers */
512 WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG,
513 adev->gfx.config.gb_addr_config);
515 /* enable JMI channel */
516 WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0,
517 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
519 /* enable System Interrupt for JRBC */
520 WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN),
521 JPEG_SYS_INT_EN__DJRBC_MASK,
522 ~JPEG_SYS_INT_EN__DJRBC_MASK);
524 WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_VMID, 0);
525 WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
526 WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
527 lower_32_bits(ring->gpu_addr));
528 WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
529 upper_32_bits(ring->gpu_addr));
530 WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_RPTR, 0);
531 WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR, 0);
532 WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, 0x00000002L);
533 WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
534 ring->wptr = RREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR);
541 * jpeg_v4_0_5_stop - stop JPEG block
543 * @adev: amdgpu_device pointer
545 * stop the JPEG block
547 static int jpeg_v4_0_5_stop(struct amdgpu_device *adev)
551 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
552 if (adev->jpeg.harvest_config & (1 << i))
555 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
556 jpeg_v4_0_5_stop_dpg_mode(adev, i);
561 WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL),
562 UVD_JMI_CNTL__SOFT_RESET_MASK,
563 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
565 jpeg_v4_0_5_enable_clock_gating(adev, i);
567 /* enable power gating */
568 r = jpeg_v4_0_5_enable_static_power_gating(adev, i);
572 if (adev->pm.dpm_enabled)
573 amdgpu_dpm_enable_jpeg(adev, false);
579 * jpeg_v4_0_5_dec_ring_get_rptr - get read pointer
581 * @ring: amdgpu_ring pointer
583 * Returns the current hardware read pointer
585 static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
587 struct amdgpu_device *adev = ring->adev;
589 return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_RPTR);
593 * jpeg_v4_0_5_dec_ring_get_wptr - get write pointer
595 * @ring: amdgpu_ring pointer
597 * Returns the current hardware write pointer
599 static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
601 struct amdgpu_device *adev = ring->adev;
603 if (ring->use_doorbell)
604 return *ring->wptr_cpu_addr;
606 return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR);
610 * jpeg_v4_0_5_dec_ring_set_wptr - set write pointer
612 * @ring: amdgpu_ring pointer
614 * Commits the write pointer to the hardware
616 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
618 struct amdgpu_device *adev = ring->adev;
620 if (ring->use_doorbell) {
621 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
622 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
624 WREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
628 static bool jpeg_v4_0_5_is_idle(void *handle)
630 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
633 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
634 if (adev->jpeg.harvest_config & (1 << i))
637 ret &= (((RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS) &
638 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
639 UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
644 static int jpeg_v4_0_5_wait_for_idle(void *handle)
646 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
649 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
650 if (adev->jpeg.harvest_config & (1 << i))
653 return SOC15_WAIT_ON_RREG(JPEG, i, regUVD_JRBC_STATUS,
654 UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
655 UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
661 static int jpeg_v4_0_5_set_clockgating_state(void *handle,
662 enum amd_clockgating_state state)
664 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
665 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
668 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
669 if (adev->jpeg.harvest_config & (1 << i))
673 if (!jpeg_v4_0_5_is_idle(handle))
676 jpeg_v4_0_5_enable_clock_gating(adev, i);
678 jpeg_v4_0_5_disable_clock_gating(adev, i);
685 static int jpeg_v4_0_5_set_powergating_state(void *handle,
686 enum amd_powergating_state state)
688 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
691 if (amdgpu_sriov_vf(adev)) {
692 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
696 if (state == adev->jpeg.cur_state)
699 if (state == AMD_PG_STATE_GATE)
700 ret = jpeg_v4_0_5_stop(adev);
702 ret = jpeg_v4_0_5_start(adev);
705 adev->jpeg.cur_state = state;
710 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
711 struct amdgpu_irq_src *source,
712 struct amdgpu_iv_entry *entry)
714 uint32_t ip_instance;
716 DRM_DEBUG("IH: JPEG TRAP\n");
718 switch (entry->client_id) {
719 case SOC15_IH_CLIENTID_VCN:
722 case SOC15_IH_CLIENTID_VCN1:
726 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
730 switch (entry->src_id) {
731 case VCN_4_0__SRCID__JPEG_DECODE:
732 amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec);
734 case VCN_4_0__SRCID_DJPEG0_POISON:
735 case VCN_4_0__SRCID_EJPEG0_POISON:
736 amdgpu_jpeg_process_poison_irq(adev, source, entry);
739 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
740 entry->src_id, entry->src_data[0]);
747 static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
748 .name = "jpeg_v4_0_5",
749 .early_init = jpeg_v4_0_5_early_init,
751 .sw_init = jpeg_v4_0_5_sw_init,
752 .sw_fini = jpeg_v4_0_5_sw_fini,
753 .hw_init = jpeg_v4_0_5_hw_init,
754 .hw_fini = jpeg_v4_0_5_hw_fini,
755 .suspend = jpeg_v4_0_5_suspend,
756 .resume = jpeg_v4_0_5_resume,
757 .is_idle = jpeg_v4_0_5_is_idle,
758 .wait_for_idle = jpeg_v4_0_5_wait_for_idle,
759 .check_soft_reset = NULL,
760 .pre_soft_reset = NULL,
762 .post_soft_reset = NULL,
763 .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
764 .set_powergating_state = jpeg_v4_0_5_set_powergating_state,
767 static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
768 .type = AMDGPU_RING_TYPE_VCN_JPEG,
770 .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
771 .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
772 .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
774 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
775 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
776 8 + /* jpeg_v4_0_5_dec_ring_emit_vm_flush */
777 18 + 18 + /* jpeg_v4_0_5_dec_ring_emit_fence x2 vm fence */
779 .emit_ib_size = 22, /* jpeg_v4_0_5_dec_ring_emit_ib */
780 .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
781 .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
782 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
783 .test_ring = amdgpu_jpeg_dec_ring_test_ring,
784 .test_ib = amdgpu_jpeg_dec_ring_test_ib,
785 .insert_nop = jpeg_v2_0_dec_ring_nop,
786 .insert_start = jpeg_v2_0_dec_ring_insert_start,
787 .insert_end = jpeg_v2_0_dec_ring_insert_end,
788 .pad_ib = amdgpu_ring_generic_pad_ib,
789 .begin_use = amdgpu_jpeg_ring_begin_use,
790 .end_use = amdgpu_jpeg_ring_end_use,
791 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
792 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
793 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
796 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
800 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
801 if (adev->jpeg.harvest_config & (1 << i))
804 adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
805 adev->jpeg.inst[i].ring_dec->me = i;
806 DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i);
810 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
811 .process = jpeg_v4_0_5_process_interrupt,
814 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
818 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
819 if (adev->jpeg.harvest_config & (1 << i))
822 adev->jpeg.inst[i].irq.num_types = 1;
823 adev->jpeg.inst[i].irq.funcs = &jpeg_v4_0_5_irq_funcs;
827 const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = {
828 .type = AMD_IP_BLOCK_TYPE_JPEG,
832 .funcs = &jpeg_v4_0_5_ip_funcs,