2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_5_0_d.h"
31 #include "uvd/uvd_5_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "bif/bif_5_0_d.h"
36 #include "smu/smu_7_1_2_d.h"
37 #include "smu/smu_7_1_2_sh_mask.h"
39 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
40 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
41 static int uvd_v5_0_start(struct amdgpu_device *adev);
42 static void uvd_v5_0_stop(struct amdgpu_device *adev);
43 static int uvd_v5_0_set_clockgating_state(void *handle,
44 enum amd_clockgating_state state);
45 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
48 * uvd_v5_0_ring_get_rptr - get read pointer
50 * @ring: amdgpu_ring pointer
52 * Returns the current hardware read pointer
54 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
56 struct amdgpu_device *adev = ring->adev;
58 return RREG32(mmUVD_RBC_RB_RPTR);
62 * uvd_v5_0_ring_get_wptr - get write pointer
64 * @ring: amdgpu_ring pointer
66 * Returns the current hardware write pointer
68 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
70 struct amdgpu_device *adev = ring->adev;
72 return RREG32(mmUVD_RBC_RB_WPTR);
76 * uvd_v5_0_ring_set_wptr - set write pointer
78 * @ring: amdgpu_ring pointer
80 * Commits the write pointer to the hardware
82 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
84 struct amdgpu_device *adev = ring->adev;
86 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
89 static int uvd_v5_0_early_init(void *handle)
91 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
93 uvd_v5_0_set_ring_funcs(adev);
94 uvd_v5_0_set_irq_funcs(adev);
99 static int uvd_v5_0_sw_init(void *handle)
101 struct amdgpu_ring *ring;
102 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
106 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
110 r = amdgpu_uvd_sw_init(adev);
114 r = amdgpu_uvd_resume(adev);
118 ring = &adev->uvd.ring;
119 sprintf(ring->name, "uvd");
120 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
125 static int uvd_v5_0_sw_fini(void *handle)
128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
130 r = amdgpu_uvd_suspend(adev);
134 return amdgpu_uvd_sw_fini(adev);
138 * uvd_v5_0_hw_init - start and test UVD block
140 * @adev: amdgpu_device pointer
142 * Initialize the hardware, boot up the VCPU and do some testing
144 static int uvd_v5_0_hw_init(void *handle)
146 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147 struct amdgpu_ring *ring = &adev->uvd.ring;
151 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
152 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
153 uvd_v5_0_enable_mgcg(adev, true);
156 r = amdgpu_ring_test_ring(ring);
162 r = amdgpu_ring_alloc(ring, 10);
164 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
168 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
169 amdgpu_ring_write(ring, tmp);
170 amdgpu_ring_write(ring, 0xFFFFF);
172 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
173 amdgpu_ring_write(ring, tmp);
174 amdgpu_ring_write(ring, 0xFFFFF);
176 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
177 amdgpu_ring_write(ring, tmp);
178 amdgpu_ring_write(ring, 0xFFFFF);
180 /* Clear timeout status bits */
181 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
182 amdgpu_ring_write(ring, 0x8);
184 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
185 amdgpu_ring_write(ring, 3);
187 amdgpu_ring_commit(ring);
191 DRM_INFO("UVD initialized successfully.\n");
198 * uvd_v5_0_hw_fini - stop the hardware block
200 * @adev: amdgpu_device pointer
202 * Stop the UVD block, mark ring as not ready any more
204 static int uvd_v5_0_hw_fini(void *handle)
206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207 struct amdgpu_ring *ring = &adev->uvd.ring;
209 if (RREG32(mmUVD_STATUS) != 0)
217 static int uvd_v5_0_suspend(void *handle)
220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 r = uvd_v5_0_hw_fini(adev);
225 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
227 return amdgpu_uvd_suspend(adev);
230 static int uvd_v5_0_resume(void *handle)
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235 r = amdgpu_uvd_resume(adev);
239 return uvd_v5_0_hw_init(adev);
243 * uvd_v5_0_mc_resume - memory controller programming
245 * @adev: amdgpu_device pointer
247 * Let the UVD memory controller know it's offsets
249 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
254 /* programm memory controller bits 0-27 */
255 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
256 lower_32_bits(adev->uvd.gpu_addr));
257 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
258 upper_32_bits(adev->uvd.gpu_addr));
260 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
261 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
262 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
263 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
266 size = AMDGPU_UVD_HEAP_SIZE;
267 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
268 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
271 size = AMDGPU_UVD_STACK_SIZE +
272 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
273 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
274 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
276 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
277 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
278 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
282 * uvd_v5_0_start - start UVD block
284 * @adev: amdgpu_device pointer
286 * Setup and start the UVD block
288 static int uvd_v5_0_start(struct amdgpu_device *adev)
290 struct amdgpu_ring *ring = &adev->uvd.ring;
291 uint32_t rb_bufsz, tmp;
292 uint32_t lmi_swap_cntl;
293 uint32_t mp_swap_cntl;
297 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
299 /* disable byte swapping */
303 uvd_v5_0_mc_resume(adev);
305 /* disable interupt */
306 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
308 /* stall UMC and register bus before resetting VCPU */
309 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
312 /* put LMI, VCPU, RBC etc... into reset */
313 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
314 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
315 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
316 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
317 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
320 /* take UVD block out of reset */
321 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
324 /* initialize UVD memory controller */
325 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
326 (1 << 21) | (1 << 9) | (1 << 20));
329 /* swap (8 in 32) RB and IB */
333 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
334 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
336 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
337 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
338 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
339 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
340 WREG32(mmUVD_MPC_SET_ALU, 0);
341 WREG32(mmUVD_MPC_SET_MUX, 0x88);
343 /* take all subblocks out of reset, except VCPU */
344 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
347 /* enable VCPU clock */
348 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
351 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
353 /* boot up the VCPU */
354 WREG32(mmUVD_SOFT_RESET, 0);
357 for (i = 0; i < 10; ++i) {
359 for (j = 0; j < 100; ++j) {
360 status = RREG32(mmUVD_STATUS);
369 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
370 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
371 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
373 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
379 DRM_ERROR("UVD not responding, giving up!!!\n");
382 /* enable master interrupt */
383 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
385 /* clear the bit 4 of UVD_STATUS */
386 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
388 rb_bufsz = order_base_2(ring->ring_size);
390 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
391 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
392 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
393 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
394 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
395 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
396 /* force RBC into idle state */
397 WREG32(mmUVD_RBC_RB_CNTL, tmp);
399 /* set the write pointer delay */
400 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
402 /* set the wb address */
403 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
405 /* programm the RB_BASE for ring buffer */
406 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
407 lower_32_bits(ring->gpu_addr));
408 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
409 upper_32_bits(ring->gpu_addr));
411 /* Initialize the ring buffer's read and write pointers */
412 WREG32(mmUVD_RBC_RB_RPTR, 0);
414 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
415 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
417 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
423 * uvd_v5_0_stop - stop UVD block
425 * @adev: amdgpu_device pointer
429 static void uvd_v5_0_stop(struct amdgpu_device *adev)
431 /* force RBC into idle state */
432 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
434 /* Stall UMC and register bus before resetting VCPU */
435 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
438 /* put VCPU into reset */
439 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
442 /* disable VCPU clock */
443 WREG32(mmUVD_VCPU_CNTL, 0x0);
445 /* Unstall UMC and register bus */
446 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
448 WREG32(mmUVD_STATUS, 0);
452 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
454 * @ring: amdgpu_ring pointer
455 * @fence: fence to emit
457 * Write a fence and a trap command to the ring.
459 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
462 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
464 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
465 amdgpu_ring_write(ring, seq);
466 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
467 amdgpu_ring_write(ring, addr & 0xffffffff);
468 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
469 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
470 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
471 amdgpu_ring_write(ring, 0);
473 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
474 amdgpu_ring_write(ring, 0);
475 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
476 amdgpu_ring_write(ring, 0);
477 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
478 amdgpu_ring_write(ring, 2);
482 * uvd_v5_0_ring_test_ring - register write test
484 * @ring: amdgpu_ring pointer
486 * Test if we can successfully write to the context register
488 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
490 struct amdgpu_device *adev = ring->adev;
495 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
496 r = amdgpu_ring_alloc(ring, 3);
498 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
502 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
503 amdgpu_ring_write(ring, 0xDEADBEEF);
504 amdgpu_ring_commit(ring);
505 for (i = 0; i < adev->usec_timeout; i++) {
506 tmp = RREG32(mmUVD_CONTEXT_ID);
507 if (tmp == 0xDEADBEEF)
512 if (i < adev->usec_timeout) {
513 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
516 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
524 * uvd_v5_0_ring_emit_ib - execute indirect buffer
526 * @ring: amdgpu_ring pointer
527 * @ib: indirect buffer to execute
529 * Write ring commands to execute the indirect buffer
531 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
532 struct amdgpu_ib *ib,
533 unsigned vmid, bool ctx_switch)
535 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
536 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
537 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
538 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
539 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
540 amdgpu_ring_write(ring, ib->length_dw);
543 static bool uvd_v5_0_is_idle(void *handle)
545 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
547 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
550 static int uvd_v5_0_wait_for_idle(void *handle)
553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
555 for (i = 0; i < adev->usec_timeout; i++) {
556 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
562 static int uvd_v5_0_soft_reset(void *handle)
564 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
568 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
569 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
572 return uvd_v5_0_start(adev);
575 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
576 struct amdgpu_irq_src *source,
578 enum amdgpu_interrupt_state state)
584 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
585 struct amdgpu_irq_src *source,
586 struct amdgpu_iv_entry *entry)
588 DRM_DEBUG("IH: UVD TRAP\n");
589 amdgpu_fence_process(&adev->uvd.ring);
593 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
595 uint32_t data1, data3, suvd_flags;
597 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
598 data3 = RREG32(mmUVD_CGC_GATE);
600 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
601 UVD_SUVD_CGC_GATE__SIT_MASK |
602 UVD_SUVD_CGC_GATE__SMP_MASK |
603 UVD_SUVD_CGC_GATE__SCM_MASK |
604 UVD_SUVD_CGC_GATE__SDB_MASK;
607 data3 |= (UVD_CGC_GATE__SYS_MASK |
608 UVD_CGC_GATE__UDEC_MASK |
609 UVD_CGC_GATE__MPEG2_MASK |
610 UVD_CGC_GATE__RBC_MASK |
611 UVD_CGC_GATE__LMI_MC_MASK |
612 UVD_CGC_GATE__IDCT_MASK |
613 UVD_CGC_GATE__MPRD_MASK |
614 UVD_CGC_GATE__MPC_MASK |
615 UVD_CGC_GATE__LBSI_MASK |
616 UVD_CGC_GATE__LRBBM_MASK |
617 UVD_CGC_GATE__UDEC_RE_MASK |
618 UVD_CGC_GATE__UDEC_CM_MASK |
619 UVD_CGC_GATE__UDEC_IT_MASK |
620 UVD_CGC_GATE__UDEC_DB_MASK |
621 UVD_CGC_GATE__UDEC_MP_MASK |
622 UVD_CGC_GATE__WCB_MASK |
623 UVD_CGC_GATE__JPEG_MASK |
624 UVD_CGC_GATE__SCPU_MASK);
625 /* only in pg enabled, we can gate clock to vcpu*/
626 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
627 data3 |= UVD_CGC_GATE__VCPU_MASK;
628 data3 &= ~UVD_CGC_GATE__REGS_MASK;
635 WREG32(mmUVD_SUVD_CGC_GATE, data1);
636 WREG32(mmUVD_CGC_GATE, data3);
639 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
641 uint32_t data, data2;
643 data = RREG32(mmUVD_CGC_CTRL);
644 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
647 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
648 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
651 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
652 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
653 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
655 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
656 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
657 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
658 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
659 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
660 UVD_CGC_CTRL__SYS_MODE_MASK |
661 UVD_CGC_CTRL__UDEC_MODE_MASK |
662 UVD_CGC_CTRL__MPEG2_MODE_MASK |
663 UVD_CGC_CTRL__REGS_MODE_MASK |
664 UVD_CGC_CTRL__RBC_MODE_MASK |
665 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
666 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
667 UVD_CGC_CTRL__IDCT_MODE_MASK |
668 UVD_CGC_CTRL__MPRD_MODE_MASK |
669 UVD_CGC_CTRL__MPC_MODE_MASK |
670 UVD_CGC_CTRL__LBSI_MODE_MASK |
671 UVD_CGC_CTRL__LRBBM_MODE_MASK |
672 UVD_CGC_CTRL__WCB_MODE_MASK |
673 UVD_CGC_CTRL__VCPU_MODE_MASK |
674 UVD_CGC_CTRL__JPEG_MODE_MASK |
675 UVD_CGC_CTRL__SCPU_MODE_MASK);
676 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
677 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
678 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
679 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
680 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
682 WREG32(mmUVD_CGC_CTRL, data);
683 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
687 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
689 uint32_t data, data1, cgc_flags, suvd_flags;
691 data = RREG32(mmUVD_CGC_GATE);
692 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
694 cgc_flags = UVD_CGC_GATE__SYS_MASK |
695 UVD_CGC_GATE__UDEC_MASK |
696 UVD_CGC_GATE__MPEG2_MASK |
697 UVD_CGC_GATE__RBC_MASK |
698 UVD_CGC_GATE__LMI_MC_MASK |
699 UVD_CGC_GATE__IDCT_MASK |
700 UVD_CGC_GATE__MPRD_MASK |
701 UVD_CGC_GATE__MPC_MASK |
702 UVD_CGC_GATE__LBSI_MASK |
703 UVD_CGC_GATE__LRBBM_MASK |
704 UVD_CGC_GATE__UDEC_RE_MASK |
705 UVD_CGC_GATE__UDEC_CM_MASK |
706 UVD_CGC_GATE__UDEC_IT_MASK |
707 UVD_CGC_GATE__UDEC_DB_MASK |
708 UVD_CGC_GATE__UDEC_MP_MASK |
709 UVD_CGC_GATE__WCB_MASK |
710 UVD_CGC_GATE__VCPU_MASK |
711 UVD_CGC_GATE__SCPU_MASK;
713 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
714 UVD_SUVD_CGC_GATE__SIT_MASK |
715 UVD_SUVD_CGC_GATE__SMP_MASK |
716 UVD_SUVD_CGC_GATE__SCM_MASK |
717 UVD_SUVD_CGC_GATE__SDB_MASK;
722 WREG32(mmUVD_CGC_GATE, data);
723 WREG32(mmUVD_SUVD_CGC_GATE, data1);
727 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
732 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
733 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
735 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
737 orig = data = RREG32(mmUVD_CGC_CTRL);
738 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
740 WREG32(mmUVD_CGC_CTRL, data);
742 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
744 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
746 orig = data = RREG32(mmUVD_CGC_CTRL);
747 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
749 WREG32(mmUVD_CGC_CTRL, data);
753 static int uvd_v5_0_set_clockgating_state(void *handle,
754 enum amd_clockgating_state state)
756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
757 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
760 /* wait for STATUS to clear */
761 if (uvd_v5_0_wait_for_idle(handle))
763 uvd_v5_0_enable_clock_gating(adev, true);
765 /* enable HW gates because UVD is idle */
766 /* uvd_v5_0_set_hw_clock_gating(adev); */
768 uvd_v5_0_enable_clock_gating(adev, false);
771 uvd_v5_0_set_sw_clock_gating(adev);
775 static int uvd_v5_0_set_powergating_state(void *handle,
776 enum amd_powergating_state state)
778 /* This doesn't actually powergate the UVD block.
779 * That's done in the dpm code via the SMC. This
780 * just re-inits the block as necessary. The actual
781 * gating still happens in the dpm code. We should
782 * revisit this when there is a cleaner line between
783 * the smc and the hw blocks
785 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
788 if (state == AMD_PG_STATE_GATE) {
791 ret = uvd_v5_0_start(adev);
800 static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
805 mutex_lock(&adev->pm.mutex);
807 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
808 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
809 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
813 /* AMD_CG_SUPPORT_UVD_MGCG */
814 data = RREG32(mmUVD_CGC_CTRL);
815 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
816 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
819 mutex_unlock(&adev->pm.mutex);
822 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
824 .early_init = uvd_v5_0_early_init,
826 .sw_init = uvd_v5_0_sw_init,
827 .sw_fini = uvd_v5_0_sw_fini,
828 .hw_init = uvd_v5_0_hw_init,
829 .hw_fini = uvd_v5_0_hw_fini,
830 .suspend = uvd_v5_0_suspend,
831 .resume = uvd_v5_0_resume,
832 .is_idle = uvd_v5_0_is_idle,
833 .wait_for_idle = uvd_v5_0_wait_for_idle,
834 .soft_reset = uvd_v5_0_soft_reset,
835 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
836 .set_powergating_state = uvd_v5_0_set_powergating_state,
837 .get_clockgating_state = uvd_v5_0_get_clockgating_state,
840 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
841 .type = AMDGPU_RING_TYPE_UVD,
843 .nop = PACKET0(mmUVD_NO_OP, 0),
844 .support_64bit_ptrs = false,
845 .get_rptr = uvd_v5_0_ring_get_rptr,
846 .get_wptr = uvd_v5_0_ring_get_wptr,
847 .set_wptr = uvd_v5_0_ring_set_wptr,
848 .parse_cs = amdgpu_uvd_ring_parse_cs,
850 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
851 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
852 .emit_ib = uvd_v5_0_ring_emit_ib,
853 .emit_fence = uvd_v5_0_ring_emit_fence,
854 .test_ring = uvd_v5_0_ring_test_ring,
855 .test_ib = amdgpu_uvd_ring_test_ib,
856 .insert_nop = amdgpu_ring_insert_nop,
857 .pad_ib = amdgpu_ring_generic_pad_ib,
858 .begin_use = amdgpu_uvd_ring_begin_use,
859 .end_use = amdgpu_uvd_ring_end_use,
862 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
864 adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
867 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
868 .set = uvd_v5_0_set_interrupt_state,
869 .process = uvd_v5_0_process_interrupt,
872 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
874 adev->uvd.irq.num_types = 1;
875 adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
878 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
880 .type = AMD_IP_BLOCK_TYPE_UVD,
884 .funcs = &uvd_v5_0_ip_funcs,