2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
39 #include "ivsrcid/ivsrcid_vislands30.h"
41 /* Polaris10/11/12 firmware version */
42 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
44 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
47 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48 static int uvd_v6_0_start(struct amdgpu_device *adev);
49 static void uvd_v6_0_stop(struct amdgpu_device *adev);
50 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51 static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
57 * uvd_v6_0_enc_support - get encode support status
59 * @adev: amdgpu_device pointer
61 * Returns the current hardware encode support status
63 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
71 * uvd_v6_0_ring_get_rptr - get read pointer
73 * @ring: amdgpu_ring pointer
75 * Returns the current hardware read pointer
77 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
79 struct amdgpu_device *adev = ring->adev;
81 return RREG32(mmUVD_RBC_RB_RPTR);
85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
87 * @ring: amdgpu_ring pointer
89 * Returns the current hardware enc read pointer
91 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
93 struct amdgpu_device *adev = ring->adev;
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
98 return RREG32(mmUVD_RB_RPTR2);
101 * uvd_v6_0_ring_get_wptr - get write pointer
103 * @ring: amdgpu_ring pointer
105 * Returns the current hardware write pointer
107 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
109 struct amdgpu_device *adev = ring->adev;
111 return RREG32(mmUVD_RBC_RB_WPTR);
115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
117 * @ring: amdgpu_ring pointer
119 * Returns the current hardware enc write pointer
121 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
123 struct amdgpu_device *adev = ring->adev;
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
128 return RREG32(mmUVD_RB_WPTR2);
132 * uvd_v6_0_ring_set_wptr - set write pointer
134 * @ring: amdgpu_ring pointer
136 * Commits the write pointer to the hardware
138 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
140 struct amdgpu_device *adev = ring->adev;
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
148 * @ring: amdgpu_ring pointer
150 * Commits the enc write pointer to the hardware
152 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
154 struct amdgpu_device *adev = ring->adev;
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
167 * @ring: the engine to test on
170 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
172 struct amdgpu_device *adev = ring->adev;
177 r = amdgpu_ring_alloc(ring, 16);
181 rptr = amdgpu_ring_get_rptr(ring);
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
192 if (i >= adev->usec_timeout)
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
201 * @ring: ring we should submit the msg to
202 * @handle: session handle to use
203 * @bo: amdgpu object for which we query the offset
204 * @fence: optional fence to return
206 * Open up a stream for HW test
208 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct amdgpu_bo *bo,
210 struct dma_fence **fence)
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
219 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
220 AMDGPU_IB_POOL_DIRECT, &job);
225 addr = amdgpu_bo_gpu_offset(bo);
228 ib->ptr[ib->length_dw++] = 0x00000018;
229 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
230 ib->ptr[ib->length_dw++] = handle;
231 ib->ptr[ib->length_dw++] = 0x00010000;
232 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
233 ib->ptr[ib->length_dw++] = addr;
235 ib->ptr[ib->length_dw++] = 0x00000014;
236 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
237 ib->ptr[ib->length_dw++] = 0x0000001c;
238 ib->ptr[ib->length_dw++] = 0x00000001;
239 ib->ptr[ib->length_dw++] = 0x00000000;
241 ib->ptr[ib->length_dw++] = 0x00000008;
242 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
244 for (i = ib->length_dw; i < ib_size_dw; ++i)
247 r = amdgpu_job_submit_direct(job, ring, &f);
252 *fence = dma_fence_get(f);
257 amdgpu_job_free(job);
262 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
264 * @ring: ring we should submit the msg to
265 * @handle: session handle to use
266 * @bo: amdgpu object for which we query the offset
267 * @fence: optional fence to return
269 * Close up a stream for HW test or if userspace failed to do so
271 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
273 struct amdgpu_bo *bo,
274 struct dma_fence **fence)
276 const unsigned ib_size_dw = 16;
277 struct amdgpu_job *job;
278 struct amdgpu_ib *ib;
279 struct dma_fence *f = NULL;
283 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
284 AMDGPU_IB_POOL_DIRECT, &job);
289 addr = amdgpu_bo_gpu_offset(bo);
292 ib->ptr[ib->length_dw++] = 0x00000018;
293 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
294 ib->ptr[ib->length_dw++] = handle;
295 ib->ptr[ib->length_dw++] = 0x00010000;
296 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
297 ib->ptr[ib->length_dw++] = addr;
299 ib->ptr[ib->length_dw++] = 0x00000014;
300 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
301 ib->ptr[ib->length_dw++] = 0x0000001c;
302 ib->ptr[ib->length_dw++] = 0x00000001;
303 ib->ptr[ib->length_dw++] = 0x00000000;
305 ib->ptr[ib->length_dw++] = 0x00000008;
306 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
308 for (i = ib->length_dw; i < ib_size_dw; ++i)
311 r = amdgpu_job_submit_direct(job, ring, &f);
316 *fence = dma_fence_get(f);
321 amdgpu_job_free(job);
326 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
328 * @ring: the engine to test on
329 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
332 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
334 struct dma_fence *fence = NULL;
335 struct amdgpu_bo *bo = NULL;
338 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
339 AMDGPU_GEM_DOMAIN_VRAM,
344 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
348 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
352 r = dma_fence_wait_timeout(fence, false, timeout);
359 dma_fence_put(fence);
361 amdgpu_bo_unreserve(bo);
362 amdgpu_bo_unref(&bo);
366 static int uvd_v6_0_early_init(void *handle)
368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
369 adev->uvd.num_uvd_inst = 1;
371 if (!(adev->flags & AMD_IS_APU) &&
372 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
375 uvd_v6_0_set_ring_funcs(adev);
377 if (uvd_v6_0_enc_support(adev)) {
378 adev->uvd.num_enc_rings = 2;
379 uvd_v6_0_set_enc_ring_funcs(adev);
382 uvd_v6_0_set_irq_funcs(adev);
387 static int uvd_v6_0_sw_init(void *handle)
389 struct amdgpu_ring *ring;
391 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
394 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
399 if (uvd_v6_0_enc_support(adev)) {
400 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
401 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
407 r = amdgpu_uvd_sw_init(adev);
411 if (!uvd_v6_0_enc_support(adev)) {
412 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
413 adev->uvd.inst->ring_enc[i].funcs = NULL;
415 adev->uvd.inst->irq.num_types = 1;
416 adev->uvd.num_enc_rings = 0;
418 DRM_INFO("UVD ENC is disabled\n");
421 ring = &adev->uvd.inst->ring;
422 sprintf(ring->name, "uvd");
423 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
424 AMDGPU_RING_PRIO_DEFAULT, NULL);
428 r = amdgpu_uvd_resume(adev);
432 if (uvd_v6_0_enc_support(adev)) {
433 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
434 ring = &adev->uvd.inst->ring_enc[i];
435 sprintf(ring->name, "uvd_enc%d", i);
436 r = amdgpu_ring_init(adev, ring, 512,
437 &adev->uvd.inst->irq, 0,
438 AMDGPU_RING_PRIO_DEFAULT, NULL);
444 r = amdgpu_uvd_entity_init(adev);
449 static int uvd_v6_0_sw_fini(void *handle)
452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
454 r = amdgpu_uvd_suspend(adev);
458 if (uvd_v6_0_enc_support(adev)) {
459 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
460 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
463 return amdgpu_uvd_sw_fini(adev);
467 * uvd_v6_0_hw_init - start and test UVD block
469 * @handle: handle used to pass amdgpu_device pointer
471 * Initialize the hardware, boot up the VCPU and do some testing
473 static int uvd_v6_0_hw_init(void *handle)
475 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
476 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
480 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
481 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
482 uvd_v6_0_enable_mgcg(adev, true);
484 r = amdgpu_ring_test_helper(ring);
488 r = amdgpu_ring_alloc(ring, 10);
490 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
494 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
495 amdgpu_ring_write(ring, tmp);
496 amdgpu_ring_write(ring, 0xFFFFF);
498 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
499 amdgpu_ring_write(ring, tmp);
500 amdgpu_ring_write(ring, 0xFFFFF);
502 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
503 amdgpu_ring_write(ring, tmp);
504 amdgpu_ring_write(ring, 0xFFFFF);
506 /* Clear timeout status bits */
507 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
508 amdgpu_ring_write(ring, 0x8);
510 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
511 amdgpu_ring_write(ring, 3);
513 amdgpu_ring_commit(ring);
515 if (uvd_v6_0_enc_support(adev)) {
516 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
517 ring = &adev->uvd.inst->ring_enc[i];
518 r = amdgpu_ring_test_helper(ring);
526 if (uvd_v6_0_enc_support(adev))
527 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
529 DRM_INFO("UVD initialized successfully.\n");
536 * uvd_v6_0_hw_fini - stop the hardware block
538 * @handle: handle used to pass amdgpu_device pointer
540 * Stop the UVD block, mark ring as not ready any more
542 static int uvd_v6_0_hw_fini(void *handle)
544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
546 if (RREG32(mmUVD_STATUS) != 0)
552 static int uvd_v6_0_suspend(void *handle)
555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
557 r = uvd_v6_0_hw_fini(adev);
561 return amdgpu_uvd_suspend(adev);
564 static int uvd_v6_0_resume(void *handle)
567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
569 r = amdgpu_uvd_resume(adev);
573 return uvd_v6_0_hw_init(adev);
577 * uvd_v6_0_mc_resume - memory controller programming
579 * @adev: amdgpu_device pointer
581 * Let the UVD memory controller know it's offsets
583 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
588 /* program memory controller bits 0-27 */
589 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
590 lower_32_bits(adev->uvd.inst->gpu_addr));
591 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
592 upper_32_bits(adev->uvd.inst->gpu_addr));
594 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
595 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
596 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
597 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
600 size = AMDGPU_UVD_HEAP_SIZE;
601 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
602 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
605 size = AMDGPU_UVD_STACK_SIZE +
606 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
607 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
608 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
610 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
611 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
612 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
614 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
618 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
623 data = RREG32(mmUVD_CGC_GATE);
624 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
626 data |= UVD_CGC_GATE__SYS_MASK |
627 UVD_CGC_GATE__UDEC_MASK |
628 UVD_CGC_GATE__MPEG2_MASK |
629 UVD_CGC_GATE__RBC_MASK |
630 UVD_CGC_GATE__LMI_MC_MASK |
631 UVD_CGC_GATE__IDCT_MASK |
632 UVD_CGC_GATE__MPRD_MASK |
633 UVD_CGC_GATE__MPC_MASK |
634 UVD_CGC_GATE__LBSI_MASK |
635 UVD_CGC_GATE__LRBBM_MASK |
636 UVD_CGC_GATE__UDEC_RE_MASK |
637 UVD_CGC_GATE__UDEC_CM_MASK |
638 UVD_CGC_GATE__UDEC_IT_MASK |
639 UVD_CGC_GATE__UDEC_DB_MASK |
640 UVD_CGC_GATE__UDEC_MP_MASK |
641 UVD_CGC_GATE__WCB_MASK |
642 UVD_CGC_GATE__VCPU_MASK |
643 UVD_CGC_GATE__SCPU_MASK;
644 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
645 UVD_SUVD_CGC_GATE__SIT_MASK |
646 UVD_SUVD_CGC_GATE__SMP_MASK |
647 UVD_SUVD_CGC_GATE__SCM_MASK |
648 UVD_SUVD_CGC_GATE__SDB_MASK |
649 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
650 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
651 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
652 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
653 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
654 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
655 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
656 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
658 data &= ~(UVD_CGC_GATE__SYS_MASK |
659 UVD_CGC_GATE__UDEC_MASK |
660 UVD_CGC_GATE__MPEG2_MASK |
661 UVD_CGC_GATE__RBC_MASK |
662 UVD_CGC_GATE__LMI_MC_MASK |
663 UVD_CGC_GATE__LMI_UMC_MASK |
664 UVD_CGC_GATE__IDCT_MASK |
665 UVD_CGC_GATE__MPRD_MASK |
666 UVD_CGC_GATE__MPC_MASK |
667 UVD_CGC_GATE__LBSI_MASK |
668 UVD_CGC_GATE__LRBBM_MASK |
669 UVD_CGC_GATE__UDEC_RE_MASK |
670 UVD_CGC_GATE__UDEC_CM_MASK |
671 UVD_CGC_GATE__UDEC_IT_MASK |
672 UVD_CGC_GATE__UDEC_DB_MASK |
673 UVD_CGC_GATE__UDEC_MP_MASK |
674 UVD_CGC_GATE__WCB_MASK |
675 UVD_CGC_GATE__VCPU_MASK |
676 UVD_CGC_GATE__SCPU_MASK);
677 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
678 UVD_SUVD_CGC_GATE__SIT_MASK |
679 UVD_SUVD_CGC_GATE__SMP_MASK |
680 UVD_SUVD_CGC_GATE__SCM_MASK |
681 UVD_SUVD_CGC_GATE__SDB_MASK |
682 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
683 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
684 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
685 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
686 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
687 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
688 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
689 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
691 WREG32(mmUVD_CGC_GATE, data);
692 WREG32(mmUVD_SUVD_CGC_GATE, data1);
697 * uvd_v6_0_start - start UVD block
699 * @adev: amdgpu_device pointer
701 * Setup and start the UVD block
703 static int uvd_v6_0_start(struct amdgpu_device *adev)
705 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
706 uint32_t rb_bufsz, tmp;
707 uint32_t lmi_swap_cntl;
708 uint32_t mp_swap_cntl;
712 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
714 /* disable byte swapping */
718 uvd_v6_0_mc_resume(adev);
720 /* disable interupt */
721 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
723 /* stall UMC and register bus before resetting VCPU */
724 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
727 /* put LMI, VCPU, RBC etc... into reset */
728 WREG32(mmUVD_SOFT_RESET,
729 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
730 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
731 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
732 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
733 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
734 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
735 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
736 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
739 /* take UVD block out of reset */
740 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
743 /* initialize UVD memory controller */
744 WREG32(mmUVD_LMI_CTRL,
745 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
746 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
747 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
748 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
749 UVD_LMI_CTRL__REQ_MODE_MASK |
750 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
753 /* swap (8 in 32) RB and IB */
757 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
758 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
760 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
761 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
762 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
763 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
764 WREG32(mmUVD_MPC_SET_ALU, 0);
765 WREG32(mmUVD_MPC_SET_MUX, 0x88);
767 /* take all subblocks out of reset, except VCPU */
768 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
771 /* enable VCPU clock */
772 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
775 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
777 /* boot up the VCPU */
778 WREG32(mmUVD_SOFT_RESET, 0);
781 for (i = 0; i < 10; ++i) {
784 for (j = 0; j < 100; ++j) {
785 status = RREG32(mmUVD_STATUS);
794 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
795 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
797 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
803 DRM_ERROR("UVD not responding, giving up!!!\n");
806 /* enable master interrupt */
807 WREG32_P(mmUVD_MASTINT_EN,
808 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
809 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
811 /* clear the bit 4 of UVD_STATUS */
812 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
814 /* force RBC into idle state */
815 rb_bufsz = order_base_2(ring->ring_size);
816 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
817 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
818 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
819 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
820 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
821 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
822 WREG32(mmUVD_RBC_RB_CNTL, tmp);
824 /* set the write pointer delay */
825 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
827 /* set the wb address */
828 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
830 /* program the RB_BASE for ring buffer */
831 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
832 lower_32_bits(ring->gpu_addr));
833 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
834 upper_32_bits(ring->gpu_addr));
836 /* Initialize the ring buffer's read and write pointers */
837 WREG32(mmUVD_RBC_RB_RPTR, 0);
839 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
840 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
842 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
844 if (uvd_v6_0_enc_support(adev)) {
845 ring = &adev->uvd.inst->ring_enc[0];
846 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
847 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
848 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
849 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
850 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
852 ring = &adev->uvd.inst->ring_enc[1];
853 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
854 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
855 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
856 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
857 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
864 * uvd_v6_0_stop - stop UVD block
866 * @adev: amdgpu_device pointer
870 static void uvd_v6_0_stop(struct amdgpu_device *adev)
872 /* force RBC into idle state */
873 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
875 /* Stall UMC and register bus before resetting VCPU */
876 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
879 /* put VCPU into reset */
880 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
883 /* disable VCPU clock */
884 WREG32(mmUVD_VCPU_CNTL, 0x0);
886 /* Unstall UMC and register bus */
887 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
889 WREG32(mmUVD_STATUS, 0);
893 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
895 * @ring: amdgpu_ring pointer
897 * @seq: sequence number
898 * @flags: fence related flags
900 * Write a fence and a trap command to the ring.
902 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
905 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
907 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
908 amdgpu_ring_write(ring, seq);
909 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
910 amdgpu_ring_write(ring, addr & 0xffffffff);
911 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
912 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
913 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
914 amdgpu_ring_write(ring, 0);
916 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
917 amdgpu_ring_write(ring, 0);
918 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
919 amdgpu_ring_write(ring, 0);
920 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
921 amdgpu_ring_write(ring, 2);
925 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
927 * @ring: amdgpu_ring pointer
929 * @seq: sequence number
930 * @flags: fence related flags
932 * Write enc a fence and a trap command to the ring.
934 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
935 u64 seq, unsigned flags)
937 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
939 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
940 amdgpu_ring_write(ring, addr);
941 amdgpu_ring_write(ring, upper_32_bits(addr));
942 amdgpu_ring_write(ring, seq);
943 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
947 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
949 * @ring: amdgpu_ring pointer
951 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
953 /* The firmware doesn't seem to like touching registers at this point. */
957 * uvd_v6_0_ring_test_ring - register write test
959 * @ring: amdgpu_ring pointer
961 * Test if we can successfully write to the context register
963 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
965 struct amdgpu_device *adev = ring->adev;
970 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
971 r = amdgpu_ring_alloc(ring, 3);
975 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
976 amdgpu_ring_write(ring, 0xDEADBEEF);
977 amdgpu_ring_commit(ring);
978 for (i = 0; i < adev->usec_timeout; i++) {
979 tmp = RREG32(mmUVD_CONTEXT_ID);
980 if (tmp == 0xDEADBEEF)
985 if (i >= adev->usec_timeout)
992 * uvd_v6_0_ring_emit_ib - execute indirect buffer
994 * @ring: amdgpu_ring pointer
995 * @job: job to retrieve vmid from
996 * @ib: indirect buffer to execute
999 * Write ring commands to execute the indirect buffer
1001 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1002 struct amdgpu_job *job,
1003 struct amdgpu_ib *ib,
1006 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1008 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1009 amdgpu_ring_write(ring, vmid);
1011 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1012 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1013 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1014 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1015 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1016 amdgpu_ring_write(ring, ib->length_dw);
1020 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1022 * @ring: amdgpu_ring pointer
1023 * @job: job to retrive vmid from
1024 * @ib: indirect buffer to execute
1027 * Write enc ring commands to execute the indirect buffer
1029 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1030 struct amdgpu_job *job,
1031 struct amdgpu_ib *ib,
1034 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1036 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1037 amdgpu_ring_write(ring, vmid);
1038 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1039 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1040 amdgpu_ring_write(ring, ib->length_dw);
1043 static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1044 uint32_t reg, uint32_t val)
1046 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1047 amdgpu_ring_write(ring, reg << 2);
1048 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1049 amdgpu_ring_write(ring, val);
1050 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1051 amdgpu_ring_write(ring, 0x8);
1054 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1055 unsigned vmid, uint64_t pd_addr)
1057 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1059 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1060 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1061 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1062 amdgpu_ring_write(ring, 0);
1063 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1064 amdgpu_ring_write(ring, 1 << vmid); /* mask */
1065 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1066 amdgpu_ring_write(ring, 0xC);
1069 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1071 uint32_t seq = ring->fence_drv.sync_seq;
1072 uint64_t addr = ring->fence_drv.gpu_addr;
1074 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1075 amdgpu_ring_write(ring, lower_32_bits(addr));
1076 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1077 amdgpu_ring_write(ring, upper_32_bits(addr));
1078 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1079 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1080 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1081 amdgpu_ring_write(ring, seq);
1082 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1083 amdgpu_ring_write(ring, 0xE);
1086 static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1090 WARN_ON(ring->wptr % 2 || count % 2);
1092 for (i = 0; i < count / 2; i++) {
1093 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1094 amdgpu_ring_write(ring, 0);
1098 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1100 uint32_t seq = ring->fence_drv.sync_seq;
1101 uint64_t addr = ring->fence_drv.gpu_addr;
1103 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1104 amdgpu_ring_write(ring, lower_32_bits(addr));
1105 amdgpu_ring_write(ring, upper_32_bits(addr));
1106 amdgpu_ring_write(ring, seq);
1109 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1111 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1114 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1115 unsigned int vmid, uint64_t pd_addr)
1117 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1118 amdgpu_ring_write(ring, vmid);
1119 amdgpu_ring_write(ring, pd_addr >> 12);
1121 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1122 amdgpu_ring_write(ring, vmid);
1125 static bool uvd_v6_0_is_idle(void *handle)
1127 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1129 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1132 static int uvd_v6_0_wait_for_idle(void *handle)
1135 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1137 for (i = 0; i < adev->usec_timeout; i++) {
1138 if (uvd_v6_0_is_idle(handle))
1144 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1145 static bool uvd_v6_0_check_soft_reset(void *handle)
1147 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1148 u32 srbm_soft_reset = 0;
1149 u32 tmp = RREG32(mmSRBM_STATUS);
1151 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1152 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1153 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1154 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1156 if (srbm_soft_reset) {
1157 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1160 adev->uvd.inst->srbm_soft_reset = 0;
1165 static int uvd_v6_0_pre_soft_reset(void *handle)
1167 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1169 if (!adev->uvd.inst->srbm_soft_reset)
1172 uvd_v6_0_stop(adev);
1176 static int uvd_v6_0_soft_reset(void *handle)
1178 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1179 u32 srbm_soft_reset;
1181 if (!adev->uvd.inst->srbm_soft_reset)
1183 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1185 if (srbm_soft_reset) {
1188 tmp = RREG32(mmSRBM_SOFT_RESET);
1189 tmp |= srbm_soft_reset;
1190 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1191 WREG32(mmSRBM_SOFT_RESET, tmp);
1192 tmp = RREG32(mmSRBM_SOFT_RESET);
1196 tmp &= ~srbm_soft_reset;
1197 WREG32(mmSRBM_SOFT_RESET, tmp);
1198 tmp = RREG32(mmSRBM_SOFT_RESET);
1200 /* Wait a little for things to settle down */
1207 static int uvd_v6_0_post_soft_reset(void *handle)
1209 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211 if (!adev->uvd.inst->srbm_soft_reset)
1216 return uvd_v6_0_start(adev);
1219 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1220 struct amdgpu_irq_src *source,
1222 enum amdgpu_interrupt_state state)
1228 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1229 struct amdgpu_irq_src *source,
1230 struct amdgpu_iv_entry *entry)
1232 bool int_handled = true;
1233 DRM_DEBUG("IH: UVD TRAP\n");
1235 switch (entry->src_id) {
1237 amdgpu_fence_process(&adev->uvd.inst->ring);
1240 if (likely(uvd_v6_0_enc_support(adev)))
1241 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1243 int_handled = false;
1246 if (likely(uvd_v6_0_enc_support(adev)))
1247 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1249 int_handled = false;
1254 DRM_ERROR("Unhandled interrupt: %d %d\n",
1255 entry->src_id, entry->src_data[0]);
1260 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1262 uint32_t data1, data3;
1264 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1265 data3 = RREG32(mmUVD_CGC_GATE);
1267 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1268 UVD_SUVD_CGC_GATE__SIT_MASK |
1269 UVD_SUVD_CGC_GATE__SMP_MASK |
1270 UVD_SUVD_CGC_GATE__SCM_MASK |
1271 UVD_SUVD_CGC_GATE__SDB_MASK |
1272 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1273 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1274 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1275 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1276 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1277 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1278 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1279 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1282 data3 |= (UVD_CGC_GATE__SYS_MASK |
1283 UVD_CGC_GATE__UDEC_MASK |
1284 UVD_CGC_GATE__MPEG2_MASK |
1285 UVD_CGC_GATE__RBC_MASK |
1286 UVD_CGC_GATE__LMI_MC_MASK |
1287 UVD_CGC_GATE__LMI_UMC_MASK |
1288 UVD_CGC_GATE__IDCT_MASK |
1289 UVD_CGC_GATE__MPRD_MASK |
1290 UVD_CGC_GATE__MPC_MASK |
1291 UVD_CGC_GATE__LBSI_MASK |
1292 UVD_CGC_GATE__LRBBM_MASK |
1293 UVD_CGC_GATE__UDEC_RE_MASK |
1294 UVD_CGC_GATE__UDEC_CM_MASK |
1295 UVD_CGC_GATE__UDEC_IT_MASK |
1296 UVD_CGC_GATE__UDEC_DB_MASK |
1297 UVD_CGC_GATE__UDEC_MP_MASK |
1298 UVD_CGC_GATE__WCB_MASK |
1299 UVD_CGC_GATE__JPEG_MASK |
1300 UVD_CGC_GATE__SCPU_MASK |
1301 UVD_CGC_GATE__JPEG2_MASK);
1302 /* only in pg enabled, we can gate clock to vcpu*/
1303 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1304 data3 |= UVD_CGC_GATE__VCPU_MASK;
1306 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1311 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1312 WREG32(mmUVD_CGC_GATE, data3);
1315 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1317 uint32_t data, data2;
1319 data = RREG32(mmUVD_CGC_CTRL);
1320 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1323 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1324 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1327 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1328 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1329 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1331 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1332 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1333 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1334 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1335 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1336 UVD_CGC_CTRL__SYS_MODE_MASK |
1337 UVD_CGC_CTRL__UDEC_MODE_MASK |
1338 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1339 UVD_CGC_CTRL__REGS_MODE_MASK |
1340 UVD_CGC_CTRL__RBC_MODE_MASK |
1341 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1342 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1343 UVD_CGC_CTRL__IDCT_MODE_MASK |
1344 UVD_CGC_CTRL__MPRD_MODE_MASK |
1345 UVD_CGC_CTRL__MPC_MODE_MASK |
1346 UVD_CGC_CTRL__LBSI_MODE_MASK |
1347 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1348 UVD_CGC_CTRL__WCB_MODE_MASK |
1349 UVD_CGC_CTRL__VCPU_MODE_MASK |
1350 UVD_CGC_CTRL__JPEG_MODE_MASK |
1351 UVD_CGC_CTRL__SCPU_MODE_MASK |
1352 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1353 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1354 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1355 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1356 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1357 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1359 WREG32(mmUVD_CGC_CTRL, data);
1360 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1364 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1366 uint32_t data, data1, cgc_flags, suvd_flags;
1368 data = RREG32(mmUVD_CGC_GATE);
1369 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1371 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1372 UVD_CGC_GATE__UDEC_MASK |
1373 UVD_CGC_GATE__MPEG2_MASK |
1374 UVD_CGC_GATE__RBC_MASK |
1375 UVD_CGC_GATE__LMI_MC_MASK |
1376 UVD_CGC_GATE__IDCT_MASK |
1377 UVD_CGC_GATE__MPRD_MASK |
1378 UVD_CGC_GATE__MPC_MASK |
1379 UVD_CGC_GATE__LBSI_MASK |
1380 UVD_CGC_GATE__LRBBM_MASK |
1381 UVD_CGC_GATE__UDEC_RE_MASK |
1382 UVD_CGC_GATE__UDEC_CM_MASK |
1383 UVD_CGC_GATE__UDEC_IT_MASK |
1384 UVD_CGC_GATE__UDEC_DB_MASK |
1385 UVD_CGC_GATE__UDEC_MP_MASK |
1386 UVD_CGC_GATE__WCB_MASK |
1387 UVD_CGC_GATE__VCPU_MASK |
1388 UVD_CGC_GATE__SCPU_MASK |
1389 UVD_CGC_GATE__JPEG_MASK |
1390 UVD_CGC_GATE__JPEG2_MASK;
1392 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1393 UVD_SUVD_CGC_GATE__SIT_MASK |
1394 UVD_SUVD_CGC_GATE__SMP_MASK |
1395 UVD_SUVD_CGC_GATE__SCM_MASK |
1396 UVD_SUVD_CGC_GATE__SDB_MASK;
1399 data1 |= suvd_flags;
1401 WREG32(mmUVD_CGC_GATE, data);
1402 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1406 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1411 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1412 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1414 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1416 orig = data = RREG32(mmUVD_CGC_CTRL);
1417 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1419 WREG32(mmUVD_CGC_CTRL, data);
1421 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1423 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1425 orig = data = RREG32(mmUVD_CGC_CTRL);
1426 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1428 WREG32(mmUVD_CGC_CTRL, data);
1432 static int uvd_v6_0_set_clockgating_state(void *handle,
1433 enum amd_clockgating_state state)
1435 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1436 bool enable = (state == AMD_CG_STATE_GATE);
1439 /* wait for STATUS to clear */
1440 if (uvd_v6_0_wait_for_idle(handle))
1442 uvd_v6_0_enable_clock_gating(adev, true);
1443 /* enable HW gates because UVD is idle */
1444 /* uvd_v6_0_set_hw_clock_gating(adev); */
1446 /* disable HW gating and enable Sw gating */
1447 uvd_v6_0_enable_clock_gating(adev, false);
1449 uvd_v6_0_set_sw_clock_gating(adev);
1453 static int uvd_v6_0_set_powergating_state(void *handle,
1454 enum amd_powergating_state state)
1456 /* This doesn't actually powergate the UVD block.
1457 * That's done in the dpm code via the SMC. This
1458 * just re-inits the block as necessary. The actual
1459 * gating still happens in the dpm code. We should
1460 * revisit this when there is a cleaner line between
1461 * the smc and the hw blocks
1463 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1466 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1468 if (state == AMD_PG_STATE_GATE) {
1469 uvd_v6_0_stop(adev);
1471 ret = uvd_v6_0_start(adev);
1480 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1482 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485 mutex_lock(&adev->pm.mutex);
1487 if (adev->flags & AMD_IS_APU)
1488 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1490 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1492 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1493 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1497 /* AMD_CG_SUPPORT_UVD_MGCG */
1498 data = RREG32(mmUVD_CGC_CTRL);
1499 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1500 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1503 mutex_unlock(&adev->pm.mutex);
1506 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1508 .early_init = uvd_v6_0_early_init,
1510 .sw_init = uvd_v6_0_sw_init,
1511 .sw_fini = uvd_v6_0_sw_fini,
1512 .hw_init = uvd_v6_0_hw_init,
1513 .hw_fini = uvd_v6_0_hw_fini,
1514 .suspend = uvd_v6_0_suspend,
1515 .resume = uvd_v6_0_resume,
1516 .is_idle = uvd_v6_0_is_idle,
1517 .wait_for_idle = uvd_v6_0_wait_for_idle,
1518 .check_soft_reset = uvd_v6_0_check_soft_reset,
1519 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1520 .soft_reset = uvd_v6_0_soft_reset,
1521 .post_soft_reset = uvd_v6_0_post_soft_reset,
1522 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1523 .set_powergating_state = uvd_v6_0_set_powergating_state,
1524 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1527 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1528 .type = AMDGPU_RING_TYPE_UVD,
1530 .support_64bit_ptrs = false,
1531 .no_user_fence = true,
1532 .get_rptr = uvd_v6_0_ring_get_rptr,
1533 .get_wptr = uvd_v6_0_ring_get_wptr,
1534 .set_wptr = uvd_v6_0_ring_set_wptr,
1535 .parse_cs = amdgpu_uvd_ring_parse_cs,
1537 6 + /* hdp invalidate */
1538 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1539 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1540 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1541 .emit_ib = uvd_v6_0_ring_emit_ib,
1542 .emit_fence = uvd_v6_0_ring_emit_fence,
1543 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1544 .test_ring = uvd_v6_0_ring_test_ring,
1545 .test_ib = amdgpu_uvd_ring_test_ib,
1546 .insert_nop = uvd_v6_0_ring_insert_nop,
1547 .pad_ib = amdgpu_ring_generic_pad_ib,
1548 .begin_use = amdgpu_uvd_ring_begin_use,
1549 .end_use = amdgpu_uvd_ring_end_use,
1550 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1553 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1554 .type = AMDGPU_RING_TYPE_UVD,
1556 .support_64bit_ptrs = false,
1557 .no_user_fence = true,
1558 .get_rptr = uvd_v6_0_ring_get_rptr,
1559 .get_wptr = uvd_v6_0_ring_get_wptr,
1560 .set_wptr = uvd_v6_0_ring_set_wptr,
1562 6 + /* hdp invalidate */
1563 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1564 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1565 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1566 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1567 .emit_ib = uvd_v6_0_ring_emit_ib,
1568 .emit_fence = uvd_v6_0_ring_emit_fence,
1569 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1570 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1571 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1572 .test_ring = uvd_v6_0_ring_test_ring,
1573 .test_ib = amdgpu_uvd_ring_test_ib,
1574 .insert_nop = uvd_v6_0_ring_insert_nop,
1575 .pad_ib = amdgpu_ring_generic_pad_ib,
1576 .begin_use = amdgpu_uvd_ring_begin_use,
1577 .end_use = amdgpu_uvd_ring_end_use,
1578 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1581 static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1582 .type = AMDGPU_RING_TYPE_UVD_ENC,
1584 .nop = HEVC_ENC_CMD_NO_OP,
1585 .support_64bit_ptrs = false,
1586 .no_user_fence = true,
1587 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1588 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1589 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1591 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1592 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1593 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1594 1, /* uvd_v6_0_enc_ring_insert_end */
1595 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1596 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1597 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1598 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1599 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1600 .test_ring = uvd_v6_0_enc_ring_test_ring,
1601 .test_ib = uvd_v6_0_enc_ring_test_ib,
1602 .insert_nop = amdgpu_ring_insert_nop,
1603 .insert_end = uvd_v6_0_enc_ring_insert_end,
1604 .pad_ib = amdgpu_ring_generic_pad_ib,
1605 .begin_use = amdgpu_uvd_ring_begin_use,
1606 .end_use = amdgpu_uvd_ring_end_use,
1609 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1611 if (adev->asic_type >= CHIP_POLARIS10) {
1612 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1613 DRM_INFO("UVD is enabled in VM mode\n");
1615 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1616 DRM_INFO("UVD is enabled in physical mode\n");
1620 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1624 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1625 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1627 DRM_INFO("UVD ENC is enabled in VM mode\n");
1630 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1631 .set = uvd_v6_0_set_interrupt_state,
1632 .process = uvd_v6_0_process_interrupt,
1635 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1637 if (uvd_v6_0_enc_support(adev))
1638 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1640 adev->uvd.inst->irq.num_types = 1;
1642 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1645 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1647 .type = AMD_IP_BLOCK_TYPE_UVD,
1651 .funcs = &uvd_v6_0_ip_funcs,
1654 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1656 .type = AMD_IP_BLOCK_TYPE_UVD,
1660 .funcs = &uvd_v6_0_ip_funcs,
1663 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1665 .type = AMD_IP_BLOCK_TYPE_UVD,
1669 .funcs = &uvd_v6_0_ip_funcs,