2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_5_0_d.h"
31 #include "uvd/uvd_5_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "bif/bif_5_0_d.h"
36 #include "smu/smu_7_1_2_d.h"
37 #include "smu/smu_7_1_2_sh_mask.h"
39 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
40 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
41 static int uvd_v5_0_start(struct amdgpu_device *adev);
42 static void uvd_v5_0_stop(struct amdgpu_device *adev);
43 static int uvd_v5_0_set_clockgating_state(void *handle,
44 enum amd_clockgating_state state);
45 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
48 * uvd_v5_0_ring_get_rptr - get read pointer
50 * @ring: amdgpu_ring pointer
52 * Returns the current hardware read pointer
54 static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
56 struct amdgpu_device *adev = ring->adev;
58 return RREG32(mmUVD_RBC_RB_RPTR);
62 * uvd_v5_0_ring_get_wptr - get write pointer
64 * @ring: amdgpu_ring pointer
66 * Returns the current hardware write pointer
68 static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
70 struct amdgpu_device *adev = ring->adev;
72 return RREG32(mmUVD_RBC_RB_WPTR);
76 * uvd_v5_0_ring_set_wptr - set write pointer
78 * @ring: amdgpu_ring pointer
80 * Commits the write pointer to the hardware
82 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
84 struct amdgpu_device *adev = ring->adev;
86 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
89 static int uvd_v5_0_early_init(void *handle)
91 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
93 uvd_v5_0_set_ring_funcs(adev);
94 uvd_v5_0_set_irq_funcs(adev);
99 static int uvd_v5_0_sw_init(void *handle)
101 struct amdgpu_ring *ring;
102 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
106 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
110 r = amdgpu_uvd_sw_init(adev);
114 r = amdgpu_uvd_resume(adev);
118 ring = &adev->uvd.ring;
119 sprintf(ring->name, "uvd");
120 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
125 static int uvd_v5_0_sw_fini(void *handle)
128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
130 r = amdgpu_uvd_suspend(adev);
134 r = amdgpu_uvd_sw_fini(adev);
142 * uvd_v5_0_hw_init - start and test UVD block
144 * @adev: amdgpu_device pointer
146 * Initialize the hardware, boot up the VCPU and do some testing
148 static int uvd_v5_0_hw_init(void *handle)
150 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
151 struct amdgpu_ring *ring = &adev->uvd.ring;
155 r = uvd_v5_0_start(adev);
160 r = amdgpu_ring_test_ring(ring);
166 r = amdgpu_ring_alloc(ring, 10);
168 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
172 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
173 amdgpu_ring_write(ring, tmp);
174 amdgpu_ring_write(ring, 0xFFFFF);
176 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
177 amdgpu_ring_write(ring, tmp);
178 amdgpu_ring_write(ring, 0xFFFFF);
180 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
181 amdgpu_ring_write(ring, tmp);
182 amdgpu_ring_write(ring, 0xFFFFF);
184 /* Clear timeout status bits */
185 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
186 amdgpu_ring_write(ring, 0x8);
188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
189 amdgpu_ring_write(ring, 3);
191 amdgpu_ring_commit(ring);
194 DRM_INFO("UVD initialized successfully.\n");
200 * uvd_v5_0_hw_fini - stop the hardware block
202 * @adev: amdgpu_device pointer
204 * Stop the UVD block, mark ring as not ready any more
206 static int uvd_v5_0_hw_fini(void *handle)
208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209 struct amdgpu_ring *ring = &adev->uvd.ring;
217 static int uvd_v5_0_suspend(void *handle)
220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 r = uvd_v5_0_hw_fini(adev);
225 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
227 r = amdgpu_uvd_suspend(adev);
234 static int uvd_v5_0_resume(void *handle)
237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239 r = amdgpu_uvd_resume(adev);
243 r = uvd_v5_0_hw_init(adev);
251 * uvd_v5_0_mc_resume - memory controller programming
253 * @adev: amdgpu_device pointer
255 * Let the UVD memory controller know it's offsets
257 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
262 /* programm memory controller bits 0-27 */
263 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
264 lower_32_bits(adev->uvd.gpu_addr));
265 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
266 upper_32_bits(adev->uvd.gpu_addr));
268 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
269 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
270 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
271 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
274 size = AMDGPU_UVD_HEAP_SIZE;
275 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
276 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
279 size = AMDGPU_UVD_STACK_SIZE +
280 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
281 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
282 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
284 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
285 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
286 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
290 * uvd_v5_0_start - start UVD block
292 * @adev: amdgpu_device pointer
294 * Setup and start the UVD block
296 static int uvd_v5_0_start(struct amdgpu_device *adev)
298 struct amdgpu_ring *ring = &adev->uvd.ring;
299 uint32_t rb_bufsz, tmp;
300 uint32_t lmi_swap_cntl;
301 uint32_t mp_swap_cntl;
305 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
307 /* disable byte swapping */
311 uvd_v5_0_mc_resume(adev);
313 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
314 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
315 uvd_v5_0_enable_mgcg(adev, true);
317 /* disable interupt */
318 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
320 /* stall UMC and register bus before resetting VCPU */
321 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
324 /* put LMI, VCPU, RBC etc... into reset */
325 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
326 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
327 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
328 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
329 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
332 /* take UVD block out of reset */
333 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
336 /* initialize UVD memory controller */
337 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
338 (1 << 21) | (1 << 9) | (1 << 20));
341 /* swap (8 in 32) RB and IB */
345 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
346 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
348 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
349 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
350 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
351 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
352 WREG32(mmUVD_MPC_SET_ALU, 0);
353 WREG32(mmUVD_MPC_SET_MUX, 0x88);
355 /* take all subblocks out of reset, except VCPU */
356 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
359 /* enable VCPU clock */
360 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
363 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
365 /* boot up the VCPU */
366 WREG32(mmUVD_SOFT_RESET, 0);
369 for (i = 0; i < 10; ++i) {
371 for (j = 0; j < 100; ++j) {
372 status = RREG32(mmUVD_STATUS);
381 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
382 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
383 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
385 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
391 DRM_ERROR("UVD not responding, giving up!!!\n");
394 /* enable master interrupt */
395 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
397 /* clear the bit 4 of UVD_STATUS */
398 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
400 rb_bufsz = order_base_2(ring->ring_size);
402 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
403 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
404 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
405 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
406 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
407 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
408 /* force RBC into idle state */
409 WREG32(mmUVD_RBC_RB_CNTL, tmp);
411 /* set the write pointer delay */
412 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
414 /* set the wb address */
415 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
417 /* programm the RB_BASE for ring buffer */
418 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
419 lower_32_bits(ring->gpu_addr));
420 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
421 upper_32_bits(ring->gpu_addr));
423 /* Initialize the ring buffer's read and write pointers */
424 WREG32(mmUVD_RBC_RB_RPTR, 0);
426 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
427 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
429 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
435 * uvd_v5_0_stop - stop UVD block
437 * @adev: amdgpu_device pointer
441 static void uvd_v5_0_stop(struct amdgpu_device *adev)
443 /* force RBC into idle state */
444 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
446 /* Stall UMC and register bus before resetting VCPU */
447 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
450 /* put VCPU into reset */
451 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
454 /* disable VCPU clock */
455 WREG32(mmUVD_VCPU_CNTL, 0x0);
457 /* Unstall UMC and register bus */
458 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
462 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
464 * @ring: amdgpu_ring pointer
465 * @fence: fence to emit
467 * Write a fence and a trap command to the ring.
469 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
472 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
474 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
475 amdgpu_ring_write(ring, seq);
476 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
477 amdgpu_ring_write(ring, addr & 0xffffffff);
478 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
479 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
480 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
481 amdgpu_ring_write(ring, 0);
483 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
484 amdgpu_ring_write(ring, 0);
485 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
486 amdgpu_ring_write(ring, 0);
487 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
488 amdgpu_ring_write(ring, 2);
492 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
494 * @ring: amdgpu_ring pointer
496 * Emits an hdp flush.
498 static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
500 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
501 amdgpu_ring_write(ring, 0);
505 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
507 * @ring: amdgpu_ring pointer
509 * Emits an hdp invalidate.
511 static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
513 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
514 amdgpu_ring_write(ring, 1);
518 * uvd_v5_0_ring_test_ring - register write test
520 * @ring: amdgpu_ring pointer
522 * Test if we can successfully write to the context register
524 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
526 struct amdgpu_device *adev = ring->adev;
531 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
532 r = amdgpu_ring_alloc(ring, 3);
534 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
538 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
539 amdgpu_ring_write(ring, 0xDEADBEEF);
540 amdgpu_ring_commit(ring);
541 for (i = 0; i < adev->usec_timeout; i++) {
542 tmp = RREG32(mmUVD_CONTEXT_ID);
543 if (tmp == 0xDEADBEEF)
548 if (i < adev->usec_timeout) {
549 DRM_INFO("ring test on %d succeeded in %d usecs\n",
552 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
560 * uvd_v5_0_ring_emit_ib - execute indirect buffer
562 * @ring: amdgpu_ring pointer
563 * @ib: indirect buffer to execute
565 * Write ring commands to execute the indirect buffer
567 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
568 struct amdgpu_ib *ib,
569 unsigned vm_id, bool ctx_switch)
571 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
572 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
573 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
574 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
575 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
576 amdgpu_ring_write(ring, ib->length_dw);
579 static bool uvd_v5_0_is_idle(void *handle)
581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
586 static int uvd_v5_0_wait_for_idle(void *handle)
589 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
591 for (i = 0; i < adev->usec_timeout; i++) {
592 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
598 static int uvd_v5_0_soft_reset(void *handle)
600 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
604 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
605 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
608 return uvd_v5_0_start(adev);
611 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
612 struct amdgpu_irq_src *source,
614 enum amdgpu_interrupt_state state)
620 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
621 struct amdgpu_irq_src *source,
622 struct amdgpu_iv_entry *entry)
624 DRM_DEBUG("IH: UVD TRAP\n");
625 amdgpu_fence_process(&adev->uvd.ring);
629 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
631 uint32_t data1, data3, suvd_flags;
633 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
634 data3 = RREG32(mmUVD_CGC_GATE);
636 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
637 UVD_SUVD_CGC_GATE__SIT_MASK |
638 UVD_SUVD_CGC_GATE__SMP_MASK |
639 UVD_SUVD_CGC_GATE__SCM_MASK |
640 UVD_SUVD_CGC_GATE__SDB_MASK;
643 data3 |= (UVD_CGC_GATE__SYS_MASK |
644 UVD_CGC_GATE__UDEC_MASK |
645 UVD_CGC_GATE__MPEG2_MASK |
646 UVD_CGC_GATE__RBC_MASK |
647 UVD_CGC_GATE__LMI_MC_MASK |
648 UVD_CGC_GATE__IDCT_MASK |
649 UVD_CGC_GATE__MPRD_MASK |
650 UVD_CGC_GATE__MPC_MASK |
651 UVD_CGC_GATE__LBSI_MASK |
652 UVD_CGC_GATE__LRBBM_MASK |
653 UVD_CGC_GATE__UDEC_RE_MASK |
654 UVD_CGC_GATE__UDEC_CM_MASK |
655 UVD_CGC_GATE__UDEC_IT_MASK |
656 UVD_CGC_GATE__UDEC_DB_MASK |
657 UVD_CGC_GATE__UDEC_MP_MASK |
658 UVD_CGC_GATE__WCB_MASK |
659 UVD_CGC_GATE__JPEG_MASK |
660 UVD_CGC_GATE__SCPU_MASK);
661 /* only in pg enabled, we can gate clock to vcpu*/
662 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
663 data3 |= UVD_CGC_GATE__VCPU_MASK;
664 data3 &= ~UVD_CGC_GATE__REGS_MASK;
671 WREG32(mmUVD_SUVD_CGC_GATE, data1);
672 WREG32(mmUVD_CGC_GATE, data3);
675 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
677 uint32_t data, data2;
679 data = RREG32(mmUVD_CGC_CTRL);
680 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
683 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
684 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
687 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
688 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
689 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
691 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
692 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
693 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
694 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
695 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
696 UVD_CGC_CTRL__SYS_MODE_MASK |
697 UVD_CGC_CTRL__UDEC_MODE_MASK |
698 UVD_CGC_CTRL__MPEG2_MODE_MASK |
699 UVD_CGC_CTRL__REGS_MODE_MASK |
700 UVD_CGC_CTRL__RBC_MODE_MASK |
701 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
702 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
703 UVD_CGC_CTRL__IDCT_MODE_MASK |
704 UVD_CGC_CTRL__MPRD_MODE_MASK |
705 UVD_CGC_CTRL__MPC_MODE_MASK |
706 UVD_CGC_CTRL__LBSI_MODE_MASK |
707 UVD_CGC_CTRL__LRBBM_MODE_MASK |
708 UVD_CGC_CTRL__WCB_MODE_MASK |
709 UVD_CGC_CTRL__VCPU_MODE_MASK |
710 UVD_CGC_CTRL__JPEG_MODE_MASK |
711 UVD_CGC_CTRL__SCPU_MODE_MASK);
712 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
713 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
714 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
715 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
716 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
718 WREG32(mmUVD_CGC_CTRL, data);
719 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
723 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
725 uint32_t data, data1, cgc_flags, suvd_flags;
727 data = RREG32(mmUVD_CGC_GATE);
728 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
730 cgc_flags = UVD_CGC_GATE__SYS_MASK |
731 UVD_CGC_GATE__UDEC_MASK |
732 UVD_CGC_GATE__MPEG2_MASK |
733 UVD_CGC_GATE__RBC_MASK |
734 UVD_CGC_GATE__LMI_MC_MASK |
735 UVD_CGC_GATE__IDCT_MASK |
736 UVD_CGC_GATE__MPRD_MASK |
737 UVD_CGC_GATE__MPC_MASK |
738 UVD_CGC_GATE__LBSI_MASK |
739 UVD_CGC_GATE__LRBBM_MASK |
740 UVD_CGC_GATE__UDEC_RE_MASK |
741 UVD_CGC_GATE__UDEC_CM_MASK |
742 UVD_CGC_GATE__UDEC_IT_MASK |
743 UVD_CGC_GATE__UDEC_DB_MASK |
744 UVD_CGC_GATE__UDEC_MP_MASK |
745 UVD_CGC_GATE__WCB_MASK |
746 UVD_CGC_GATE__VCPU_MASK |
747 UVD_CGC_GATE__SCPU_MASK;
749 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
750 UVD_SUVD_CGC_GATE__SIT_MASK |
751 UVD_SUVD_CGC_GATE__SMP_MASK |
752 UVD_SUVD_CGC_GATE__SCM_MASK |
753 UVD_SUVD_CGC_GATE__SDB_MASK;
758 WREG32(mmUVD_CGC_GATE, data);
759 WREG32(mmUVD_SUVD_CGC_GATE, data1);
763 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
768 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
769 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
771 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
773 orig = data = RREG32(mmUVD_CGC_CTRL);
774 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
776 WREG32(mmUVD_CGC_CTRL, data);
778 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
780 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
782 orig = data = RREG32(mmUVD_CGC_CTRL);
783 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
785 WREG32(mmUVD_CGC_CTRL, data);
789 static int uvd_v5_0_set_clockgating_state(void *handle,
790 enum amd_clockgating_state state)
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
794 static int curstate = -1;
796 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
799 if (curstate == state)
804 /* wait for STATUS to clear */
805 if (uvd_v5_0_wait_for_idle(handle))
807 uvd_v5_0_enable_clock_gating(adev, true);
809 /* enable HW gates because UVD is idle */
810 /* uvd_v5_0_set_hw_clock_gating(adev); */
812 uvd_v5_0_enable_clock_gating(adev, false);
815 uvd_v5_0_set_sw_clock_gating(adev);
819 static int uvd_v5_0_set_powergating_state(void *handle,
820 enum amd_powergating_state state)
822 /* This doesn't actually powergate the UVD block.
823 * That's done in the dpm code via the SMC. This
824 * just re-inits the block as necessary. The actual
825 * gating still happens in the dpm code. We should
826 * revisit this when there is a cleaner line between
827 * the smc and the hw blocks
829 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
831 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
834 if (state == AMD_PG_STATE_GATE) {
838 return uvd_v5_0_start(adev);
842 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
844 .early_init = uvd_v5_0_early_init,
846 .sw_init = uvd_v5_0_sw_init,
847 .sw_fini = uvd_v5_0_sw_fini,
848 .hw_init = uvd_v5_0_hw_init,
849 .hw_fini = uvd_v5_0_hw_fini,
850 .suspend = uvd_v5_0_suspend,
851 .resume = uvd_v5_0_resume,
852 .is_idle = uvd_v5_0_is_idle,
853 .wait_for_idle = uvd_v5_0_wait_for_idle,
854 .soft_reset = uvd_v5_0_soft_reset,
855 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
856 .set_powergating_state = uvd_v5_0_set_powergating_state,
859 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
860 .type = AMDGPU_RING_TYPE_UVD,
862 .nop = PACKET0(mmUVD_NO_OP, 0),
863 .get_rptr = uvd_v5_0_ring_get_rptr,
864 .get_wptr = uvd_v5_0_ring_get_wptr,
865 .set_wptr = uvd_v5_0_ring_set_wptr,
866 .parse_cs = amdgpu_uvd_ring_parse_cs,
868 2 + /* uvd_v5_0_ring_emit_hdp_flush */
869 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
870 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
871 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
872 .emit_ib = uvd_v5_0_ring_emit_ib,
873 .emit_fence = uvd_v5_0_ring_emit_fence,
874 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
875 .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
876 .test_ring = uvd_v5_0_ring_test_ring,
877 .test_ib = amdgpu_uvd_ring_test_ib,
878 .insert_nop = amdgpu_ring_insert_nop,
879 .pad_ib = amdgpu_ring_generic_pad_ib,
880 .begin_use = amdgpu_uvd_ring_begin_use,
881 .end_use = amdgpu_uvd_ring_end_use,
884 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
886 adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
889 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
890 .set = uvd_v5_0_set_interrupt_state,
891 .process = uvd_v5_0_process_interrupt,
894 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
896 adev->uvd.irq.num_types = 1;
897 adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
900 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
902 .type = AMD_IP_BLOCK_TYPE_UVD,
906 .funcs = &uvd_v5_0_ip_funcs,