2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
31 #include "uvd/uvd_4_2_d.h"
32 #include "uvd/uvd_4_2_sh_mask.h"
34 #include "oss/oss_2_0_d.h"
35 #include "oss/oss_2_0_sh_mask.h"
37 #include "bif/bif_4_1_d.h"
39 #include "smu/smu_7_0_1_d.h"
40 #include "smu/smu_7_0_1_sh_mask.h"
42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
45 static int uvd_v4_2_start(struct amdgpu_device *adev);
46 static void uvd_v4_2_stop(struct amdgpu_device *adev);
47 static int uvd_v4_2_set_clockgating_state(void *handle,
48 enum amd_clockgating_state state);
49 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
52 * uvd_v4_2_ring_get_rptr - get read pointer
54 * @ring: amdgpu_ring pointer
56 * Returns the current hardware read pointer
58 static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
60 struct amdgpu_device *adev = ring->adev;
62 return RREG32(mmUVD_RBC_RB_RPTR);
66 * uvd_v4_2_ring_get_wptr - get write pointer
68 * @ring: amdgpu_ring pointer
70 * Returns the current hardware write pointer
72 static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
74 struct amdgpu_device *adev = ring->adev;
76 return RREG32(mmUVD_RBC_RB_WPTR);
80 * uvd_v4_2_ring_set_wptr - set write pointer
82 * @ring: amdgpu_ring pointer
84 * Commits the write pointer to the hardware
86 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
88 struct amdgpu_device *adev = ring->adev;
90 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
93 static int uvd_v4_2_early_init(void *handle)
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 adev->uvd.num_uvd_inst = 1;
98 uvd_v4_2_set_ring_funcs(adev);
99 uvd_v4_2_set_irq_funcs(adev);
104 static int uvd_v4_2_sw_init(void *handle)
106 struct amdgpu_ring *ring;
107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
111 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
115 r = amdgpu_uvd_sw_init(adev);
119 ring = &adev->uvd.inst->ring;
120 sprintf(ring->name, "uvd");
121 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
122 AMDGPU_RING_PRIO_DEFAULT, NULL);
126 r = amdgpu_uvd_resume(adev);
133 static int uvd_v4_2_sw_fini(void *handle)
136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
138 r = amdgpu_uvd_suspend(adev);
142 return amdgpu_uvd_sw_fini(adev);
145 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
148 * uvd_v4_2_hw_init - start and test UVD block
150 * @handle: handle used to pass amdgpu_device pointer
152 * Initialize the hardware, boot up the VCPU and do some testing
154 static int uvd_v4_2_hw_init(void *handle)
156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
157 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
161 uvd_v4_2_enable_mgcg(adev, true);
162 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
164 r = amdgpu_ring_test_helper(ring);
168 r = amdgpu_ring_alloc(ring, 10);
170 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
174 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
175 amdgpu_ring_write(ring, tmp);
176 amdgpu_ring_write(ring, 0xFFFFF);
178 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
179 amdgpu_ring_write(ring, tmp);
180 amdgpu_ring_write(ring, 0xFFFFF);
182 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
183 amdgpu_ring_write(ring, tmp);
184 amdgpu_ring_write(ring, 0xFFFFF);
186 /* Clear timeout status bits */
187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
188 amdgpu_ring_write(ring, 0x8);
190 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
191 amdgpu_ring_write(ring, 3);
193 amdgpu_ring_commit(ring);
197 DRM_INFO("UVD initialized successfully.\n");
203 * uvd_v4_2_hw_fini - stop the hardware block
205 * @handle: handle used to pass amdgpu_device pointer
207 * Stop the UVD block, mark ring as not ready any more
209 static int uvd_v4_2_hw_fini(void *handle)
211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
213 cancel_delayed_work_sync(&adev->uvd.idle_work);
215 if (RREG32(mmUVD_STATUS) != 0)
221 static int uvd_v4_2_prepare_suspend(void *handle)
223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
225 return amdgpu_uvd_prepare_suspend(adev);
228 static int uvd_v4_2_suspend(void *handle)
231 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234 * Proper cleanups before halting the HW engine:
235 * - cancel the delayed idle work
236 * - enable powergating
237 * - enable clockgating
240 * TODO: to align with the VCN implementation, move the
241 * jobs for clockgating/powergating/dpm setting to
242 * ->set_powergating_state().
244 cancel_delayed_work_sync(&adev->uvd.idle_work);
246 if (adev->pm.dpm_enabled) {
247 amdgpu_dpm_enable_uvd(adev, false);
249 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
250 /* shutdown the UVD block */
251 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
253 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
257 r = uvd_v4_2_hw_fini(adev);
261 return amdgpu_uvd_suspend(adev);
264 static int uvd_v4_2_resume(void *handle)
267 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
269 r = amdgpu_uvd_resume(adev);
273 return uvd_v4_2_hw_init(adev);
277 * uvd_v4_2_start - start UVD block
279 * @adev: amdgpu_device pointer
281 * Setup and start the UVD block
283 static int uvd_v4_2_start(struct amdgpu_device *adev)
285 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
289 /* disable byte swapping */
290 u32 lmi_swap_cntl = 0;
291 u32 mp_swap_cntl = 0;
294 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
296 uvd_v4_2_set_dcm(adev, true);
297 WREG32(mmUVD_CGC_GATE, 0);
299 /* take UVD block out of reset */
300 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
303 /* enable VCPU clock */
304 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
306 /* disable interupt */
307 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
310 /* swap (8 in 32) RB and IB */
314 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
315 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
316 /* initialize UVD memory controller */
317 WREG32(mmUVD_LMI_CTRL, 0x203108);
319 tmp = RREG32(mmUVD_MPC_CNTL);
320 WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
322 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
323 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
324 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
325 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
326 WREG32(mmUVD_MPC_SET_ALU, 0);
327 WREG32(mmUVD_MPC_SET_MUX, 0x88);
329 uvd_v4_2_mc_resume(adev);
331 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
332 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
335 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
337 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
339 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
341 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
345 for (i = 0; i < 10; ++i) {
347 for (j = 0; j < 100; ++j) {
348 status = RREG32(mmUVD_STATUS);
357 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
358 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
359 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
361 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
367 DRM_ERROR("UVD not responding, giving up!!!\n");
371 /* enable interupt */
372 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
374 WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
376 /* force RBC into idle state */
377 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
379 /* Set the write pointer delay */
380 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
382 /* program the 4GB memory segment for rptr and ring buffer */
383 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
384 (0x7 << 16) | (0x1 << 31));
386 /* Initialize the ring buffer's read and write pointers */
387 WREG32(mmUVD_RBC_RB_RPTR, 0x0);
389 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
390 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
392 /* set the ring address */
393 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
395 /* Set ring buffer size */
396 rb_bufsz = order_base_2(ring->ring_size);
397 rb_bufsz = (0x1 << 8) | rb_bufsz;
398 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
404 * uvd_v4_2_stop - stop UVD block
406 * @adev: amdgpu_device pointer
410 static void uvd_v4_2_stop(struct amdgpu_device *adev)
415 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
417 for (i = 0; i < 10; ++i) {
418 for (j = 0; j < 100; ++j) {
419 status = RREG32(mmUVD_STATUS);
428 for (i = 0; i < 10; ++i) {
429 for (j = 0; j < 100; ++j) {
430 status = RREG32(mmUVD_LMI_STATUS);
439 /* Stall UMC and register bus before resetting VCPU */
440 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
442 for (i = 0; i < 10; ++i) {
443 for (j = 0; j < 100; ++j) {
444 status = RREG32(mmUVD_LMI_STATUS);
453 WREG32_P(0x3D49, 0, ~(1 << 2));
455 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
457 /* put LMI, VCPU, RBC etc... into reset */
458 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
459 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
460 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
462 WREG32(mmUVD_STATUS, 0);
464 uvd_v4_2_set_dcm(adev, false);
468 * uvd_v4_2_ring_emit_fence - emit an fence & trap command
470 * @ring: amdgpu_ring pointer
472 * @seq: sequence number
473 * @flags: fence related flags
475 * Write a fence and a trap command to the ring.
477 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
480 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
482 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
483 amdgpu_ring_write(ring, seq);
484 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
485 amdgpu_ring_write(ring, addr & 0xffffffff);
486 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
487 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
488 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
489 amdgpu_ring_write(ring, 0);
491 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
492 amdgpu_ring_write(ring, 0);
493 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
494 amdgpu_ring_write(ring, 0);
495 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
496 amdgpu_ring_write(ring, 2);
500 * uvd_v4_2_ring_test_ring - register write test
502 * @ring: amdgpu_ring pointer
504 * Test if we can successfully write to the context register
506 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
508 struct amdgpu_device *adev = ring->adev;
513 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
514 r = amdgpu_ring_alloc(ring, 3);
518 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
519 amdgpu_ring_write(ring, 0xDEADBEEF);
520 amdgpu_ring_commit(ring);
521 for (i = 0; i < adev->usec_timeout; i++) {
522 tmp = RREG32(mmUVD_CONTEXT_ID);
523 if (tmp == 0xDEADBEEF)
528 if (i >= adev->usec_timeout)
535 * uvd_v4_2_ring_emit_ib - execute indirect buffer
537 * @ring: amdgpu_ring pointer
538 * @job: iob associated with the indirect buffer
539 * @ib: indirect buffer to execute
540 * @flags: flags associated with the indirect buffer
542 * Write ring commands to execute the indirect buffer
544 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
545 struct amdgpu_job *job,
546 struct amdgpu_ib *ib,
549 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
550 amdgpu_ring_write(ring, ib->gpu_addr);
551 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
552 amdgpu_ring_write(ring, ib->length_dw);
555 static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
559 WARN_ON(ring->wptr % 2 || count % 2);
561 for (i = 0; i < count / 2; i++) {
562 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
563 amdgpu_ring_write(ring, 0);
568 * uvd_v4_2_mc_resume - memory controller programming
570 * @adev: amdgpu_device pointer
572 * Let the UVD memory controller know it's offsets
574 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
579 /* program the VCPU memory controller bits 0-27 */
580 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
581 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
582 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
583 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
586 size = AMDGPU_UVD_HEAP_SIZE >> 3;
587 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
588 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
591 size = (AMDGPU_UVD_STACK_SIZE +
592 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
593 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
594 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
597 addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
598 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
601 addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
602 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
604 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
605 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
606 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
609 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
614 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
615 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
617 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
619 orig = data = RREG32(mmUVD_CGC_CTRL);
620 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
622 WREG32(mmUVD_CGC_CTRL, data);
624 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
626 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
628 orig = data = RREG32(mmUVD_CGC_CTRL);
629 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
631 WREG32(mmUVD_CGC_CTRL, data);
635 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
640 WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
642 tmp = RREG32(mmUVD_CGC_CTRL);
643 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
644 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
645 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
646 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
650 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
651 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
652 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
658 WREG32(mmUVD_CGC_CTRL, tmp);
659 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
662 static bool uvd_v4_2_is_idle(void *handle)
664 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
666 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
669 static int uvd_v4_2_wait_for_idle(void *handle)
672 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
674 for (i = 0; i < adev->usec_timeout; i++) {
675 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
681 static int uvd_v4_2_soft_reset(void *handle)
683 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
687 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
688 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
691 return uvd_v4_2_start(adev);
694 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
695 struct amdgpu_irq_src *source,
697 enum amdgpu_interrupt_state state)
703 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
704 struct amdgpu_irq_src *source,
705 struct amdgpu_iv_entry *entry)
707 DRM_DEBUG("IH: UVD TRAP\n");
708 amdgpu_fence_process(&adev->uvd.inst->ring);
712 static int uvd_v4_2_set_clockgating_state(void *handle,
713 enum amd_clockgating_state state)
718 static int uvd_v4_2_set_powergating_state(void *handle,
719 enum amd_powergating_state state)
721 /* This doesn't actually powergate the UVD block.
722 * That's done in the dpm code via the SMC. This
723 * just re-inits the block as necessary. The actual
724 * gating still happens in the dpm code. We should
725 * revisit this when there is a cleaner line between
726 * the smc and the hw blocks
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730 if (state == AMD_PG_STATE_GATE) {
732 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
733 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
734 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
735 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
736 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
737 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
743 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
744 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
745 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
746 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
747 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
748 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
752 return uvd_v4_2_start(adev);
756 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
758 .early_init = uvd_v4_2_early_init,
760 .sw_init = uvd_v4_2_sw_init,
761 .sw_fini = uvd_v4_2_sw_fini,
762 .hw_init = uvd_v4_2_hw_init,
763 .hw_fini = uvd_v4_2_hw_fini,
764 .prepare_suspend = uvd_v4_2_prepare_suspend,
765 .suspend = uvd_v4_2_suspend,
766 .resume = uvd_v4_2_resume,
767 .is_idle = uvd_v4_2_is_idle,
768 .wait_for_idle = uvd_v4_2_wait_for_idle,
769 .soft_reset = uvd_v4_2_soft_reset,
770 .set_clockgating_state = uvd_v4_2_set_clockgating_state,
771 .set_powergating_state = uvd_v4_2_set_powergating_state,
772 .dump_ip_state = NULL,
773 .print_ip_state = NULL,
776 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
777 .type = AMDGPU_RING_TYPE_UVD,
779 .support_64bit_ptrs = false,
780 .no_user_fence = true,
781 .get_rptr = uvd_v4_2_ring_get_rptr,
782 .get_wptr = uvd_v4_2_ring_get_wptr,
783 .set_wptr = uvd_v4_2_ring_set_wptr,
784 .parse_cs = amdgpu_uvd_ring_parse_cs,
786 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
787 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
788 .emit_ib = uvd_v4_2_ring_emit_ib,
789 .emit_fence = uvd_v4_2_ring_emit_fence,
790 .test_ring = uvd_v4_2_ring_test_ring,
791 .test_ib = amdgpu_uvd_ring_test_ib,
792 .insert_nop = uvd_v4_2_ring_insert_nop,
793 .pad_ib = amdgpu_ring_generic_pad_ib,
794 .begin_use = amdgpu_uvd_ring_begin_use,
795 .end_use = amdgpu_uvd_ring_end_use,
798 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
800 adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
803 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
804 .set = uvd_v4_2_set_interrupt_state,
805 .process = uvd_v4_2_process_interrupt,
808 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
810 adev->uvd.inst->irq.num_types = 1;
811 adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
814 const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
816 .type = AMD_IP_BLOCK_TYPE_UVD,
820 .funcs = &uvd_v4_2_ip_funcs,