2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
40 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
43 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
44 static int uvd_v6_0_start(struct amdgpu_device *adev);
45 static void uvd_v6_0_stop(struct amdgpu_device *adev);
46 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
47 static int uvd_v6_0_set_clockgating_state(void *handle,
48 enum amd_clockgating_state state);
49 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
53 * uvd_v6_0_enc_support - get encode support status
55 * @adev: amdgpu_device pointer
57 * Returns the current hardware encode support status
59 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
61 return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
65 * uvd_v6_0_ring_get_rptr - get read pointer
67 * @ring: amdgpu_ring pointer
69 * Returns the current hardware read pointer
71 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
73 struct amdgpu_device *adev = ring->adev;
75 return RREG32(mmUVD_RBC_RB_RPTR);
79 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
81 * @ring: amdgpu_ring pointer
83 * Returns the current hardware enc read pointer
85 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
87 struct amdgpu_device *adev = ring->adev;
89 if (ring == &adev->uvd.ring_enc[0])
90 return RREG32(mmUVD_RB_RPTR);
92 return RREG32(mmUVD_RB_RPTR2);
95 * uvd_v6_0_ring_get_wptr - get write pointer
97 * @ring: amdgpu_ring pointer
99 * Returns the current hardware write pointer
101 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
103 struct amdgpu_device *adev = ring->adev;
105 return RREG32(mmUVD_RBC_RB_WPTR);
109 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
111 * @ring: amdgpu_ring pointer
113 * Returns the current hardware enc write pointer
115 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117 struct amdgpu_device *adev = ring->adev;
119 if (ring == &adev->uvd.ring_enc[0])
120 return RREG32(mmUVD_RB_WPTR);
122 return RREG32(mmUVD_RB_WPTR2);
126 * uvd_v6_0_ring_set_wptr - set write pointer
128 * @ring: amdgpu_ring pointer
130 * Commits the write pointer to the hardware
132 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
134 struct amdgpu_device *adev = ring->adev;
136 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
140 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
142 * @ring: amdgpu_ring pointer
144 * Commits the enc write pointer to the hardware
146 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
148 struct amdgpu_device *adev = ring->adev;
150 if (ring == &adev->uvd.ring_enc[0])
151 WREG32(mmUVD_RB_WPTR,
152 lower_32_bits(ring->wptr));
154 WREG32(mmUVD_RB_WPTR2,
155 lower_32_bits(ring->wptr));
159 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
161 * @ring: the engine to test on
164 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
166 struct amdgpu_device *adev = ring->adev;
167 uint32_t rptr = amdgpu_ring_get_rptr(ring);
171 r = amdgpu_ring_alloc(ring, 16);
173 DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
177 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
178 amdgpu_ring_commit(ring);
180 for (i = 0; i < adev->usec_timeout; i++) {
181 if (amdgpu_ring_get_rptr(ring) != rptr)
186 if (i < adev->usec_timeout) {
187 DRM_INFO("ring test on %d succeeded in %d usecs\n",
190 DRM_ERROR("amdgpu: ring %d test failed\n",
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
201 * @adev: amdgpu_device pointer
202 * @ring: ring we should submit the msg to
203 * @handle: session handle to use
204 * @fence: optional fence to return
206 * Open up a stream for HW test
208 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct dma_fence **fence)
211 const unsigned ib_size_dw = 16;
212 struct amdgpu_job *job;
213 struct amdgpu_ib *ib;
214 struct dma_fence *f = NULL;
218 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
223 dummy = ib->gpu_addr + 1024;
226 ib->ptr[ib->length_dw++] = 0x00000018;
227 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
228 ib->ptr[ib->length_dw++] = handle;
229 ib->ptr[ib->length_dw++] = 0x00010000;
230 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
231 ib->ptr[ib->length_dw++] = dummy;
233 ib->ptr[ib->length_dw++] = 0x00000014;
234 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
235 ib->ptr[ib->length_dw++] = 0x0000001c;
236 ib->ptr[ib->length_dw++] = 0x00000001;
237 ib->ptr[ib->length_dw++] = 0x00000000;
239 ib->ptr[ib->length_dw++] = 0x00000008;
240 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
242 for (i = ib->length_dw; i < ib_size_dw; ++i)
245 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
246 job->fence = dma_fence_get(f);
250 amdgpu_job_free(job);
252 *fence = dma_fence_get(f);
257 amdgpu_job_free(job);
262 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
264 * @adev: amdgpu_device pointer
265 * @ring: ring we should submit the msg to
266 * @handle: session handle to use
267 * @fence: optional fence to return
269 * Close up a stream for HW test or if userspace failed to do so
271 int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
272 bool direct, struct dma_fence **fence)
274 const unsigned ib_size_dw = 16;
275 struct amdgpu_job *job;
276 struct amdgpu_ib *ib;
277 struct dma_fence *f = NULL;
281 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
286 dummy = ib->gpu_addr + 1024;
289 ib->ptr[ib->length_dw++] = 0x00000018;
290 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
291 ib->ptr[ib->length_dw++] = handle;
292 ib->ptr[ib->length_dw++] = 0x00010000;
293 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
294 ib->ptr[ib->length_dw++] = dummy;
296 ib->ptr[ib->length_dw++] = 0x00000014;
297 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
298 ib->ptr[ib->length_dw++] = 0x0000001c;
299 ib->ptr[ib->length_dw++] = 0x00000001;
300 ib->ptr[ib->length_dw++] = 0x00000000;
302 ib->ptr[ib->length_dw++] = 0x00000008;
303 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
305 for (i = ib->length_dw; i < ib_size_dw; ++i)
309 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
310 job->fence = dma_fence_get(f);
314 amdgpu_job_free(job);
316 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
317 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
323 *fence = dma_fence_get(f);
328 amdgpu_job_free(job);
333 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
335 * @ring: the engine to test on
338 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
340 struct dma_fence *fence = NULL;
343 r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
345 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
349 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
351 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
355 r = dma_fence_wait_timeout(fence, false, timeout);
357 DRM_ERROR("amdgpu: IB test timed out.\n");
360 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
362 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
366 dma_fence_put(fence);
369 static int uvd_v6_0_early_init(void *handle)
371 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 uvd_v6_0_set_ring_funcs(adev);
375 if (uvd_v6_0_enc_support(adev)) {
376 adev->uvd.num_enc_rings = 2;
377 uvd_v6_0_set_enc_ring_funcs(adev);
380 uvd_v6_0_set_irq_funcs(adev);
385 static int uvd_v6_0_sw_init(void *handle)
387 struct amdgpu_ring *ring;
389 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
392 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
397 if (uvd_v6_0_enc_support(adev)) {
398 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
399 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
405 r = amdgpu_uvd_sw_init(adev);
409 if (uvd_v6_0_enc_support(adev)) {
410 struct amd_sched_rq *rq;
411 ring = &adev->uvd.ring_enc[0];
412 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
413 r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
414 rq, amdgpu_sched_jobs);
416 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
421 r = amdgpu_uvd_resume(adev);
425 ring = &adev->uvd.ring;
426 sprintf(ring->name, "uvd");
427 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
431 if (uvd_v6_0_enc_support(adev)) {
432 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
433 ring = &adev->uvd.ring_enc[i];
434 sprintf(ring->name, "uvd_enc%d", i);
435 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
444 static int uvd_v6_0_sw_fini(void *handle)
447 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
449 r = amdgpu_uvd_suspend(adev);
453 if (uvd_v6_0_enc_support(adev)) {
454 amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
456 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
457 amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
460 return amdgpu_uvd_sw_fini(adev);
464 * uvd_v6_0_hw_init - start and test UVD block
466 * @adev: amdgpu_device pointer
468 * Initialize the hardware, boot up the VCPU and do some testing
470 static int uvd_v6_0_hw_init(void *handle)
472 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
473 struct amdgpu_ring *ring = &adev->uvd.ring;
477 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
478 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
479 uvd_v6_0_enable_mgcg(adev, true);
482 r = amdgpu_ring_test_ring(ring);
488 r = amdgpu_ring_alloc(ring, 10);
490 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
494 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
495 amdgpu_ring_write(ring, tmp);
496 amdgpu_ring_write(ring, 0xFFFFF);
498 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
499 amdgpu_ring_write(ring, tmp);
500 amdgpu_ring_write(ring, 0xFFFFF);
502 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
503 amdgpu_ring_write(ring, tmp);
504 amdgpu_ring_write(ring, 0xFFFFF);
506 /* Clear timeout status bits */
507 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
508 amdgpu_ring_write(ring, 0x8);
510 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
511 amdgpu_ring_write(ring, 3);
513 amdgpu_ring_commit(ring);
515 if (uvd_v6_0_enc_support(adev)) {
516 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
517 ring = &adev->uvd.ring_enc[i];
519 r = amdgpu_ring_test_ring(ring);
529 if (uvd_v6_0_enc_support(adev))
530 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
532 DRM_INFO("UVD initialized successfully.\n");
539 * uvd_v6_0_hw_fini - stop the hardware block
541 * @adev: amdgpu_device pointer
543 * Stop the UVD block, mark ring as not ready any more
545 static int uvd_v6_0_hw_fini(void *handle)
547 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
548 struct amdgpu_ring *ring = &adev->uvd.ring;
550 if (RREG32(mmUVD_STATUS) != 0)
558 static int uvd_v6_0_suspend(void *handle)
561 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
563 r = uvd_v6_0_hw_fini(adev);
567 /* Skip this for APU for now */
568 if (!(adev->flags & AMD_IS_APU))
569 r = amdgpu_uvd_suspend(adev);
574 static int uvd_v6_0_resume(void *handle)
577 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
579 /* Skip this for APU for now */
580 if (!(adev->flags & AMD_IS_APU)) {
581 r = amdgpu_uvd_resume(adev);
585 return uvd_v6_0_hw_init(adev);
589 * uvd_v6_0_mc_resume - memory controller programming
591 * @adev: amdgpu_device pointer
593 * Let the UVD memory controller know it's offsets
595 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
600 /* programm memory controller bits 0-27 */
601 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
602 lower_32_bits(adev->uvd.gpu_addr));
603 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
604 upper_32_bits(adev->uvd.gpu_addr));
606 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
607 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
608 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
609 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
612 size = AMDGPU_UVD_HEAP_SIZE;
613 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
614 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
617 size = AMDGPU_UVD_STACK_SIZE +
618 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
619 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
620 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
622 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
623 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
624 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
626 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
630 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
635 data = RREG32(mmUVD_CGC_GATE);
636 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
638 data |= UVD_CGC_GATE__SYS_MASK |
639 UVD_CGC_GATE__UDEC_MASK |
640 UVD_CGC_GATE__MPEG2_MASK |
641 UVD_CGC_GATE__RBC_MASK |
642 UVD_CGC_GATE__LMI_MC_MASK |
643 UVD_CGC_GATE__IDCT_MASK |
644 UVD_CGC_GATE__MPRD_MASK |
645 UVD_CGC_GATE__MPC_MASK |
646 UVD_CGC_GATE__LBSI_MASK |
647 UVD_CGC_GATE__LRBBM_MASK |
648 UVD_CGC_GATE__UDEC_RE_MASK |
649 UVD_CGC_GATE__UDEC_CM_MASK |
650 UVD_CGC_GATE__UDEC_IT_MASK |
651 UVD_CGC_GATE__UDEC_DB_MASK |
652 UVD_CGC_GATE__UDEC_MP_MASK |
653 UVD_CGC_GATE__WCB_MASK |
654 UVD_CGC_GATE__VCPU_MASK |
655 UVD_CGC_GATE__SCPU_MASK;
656 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
657 UVD_SUVD_CGC_GATE__SIT_MASK |
658 UVD_SUVD_CGC_GATE__SMP_MASK |
659 UVD_SUVD_CGC_GATE__SCM_MASK |
660 UVD_SUVD_CGC_GATE__SDB_MASK |
661 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
662 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
663 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
664 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
665 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
666 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
667 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
668 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
670 data &= ~(UVD_CGC_GATE__SYS_MASK |
671 UVD_CGC_GATE__UDEC_MASK |
672 UVD_CGC_GATE__MPEG2_MASK |
673 UVD_CGC_GATE__RBC_MASK |
674 UVD_CGC_GATE__LMI_MC_MASK |
675 UVD_CGC_GATE__LMI_UMC_MASK |
676 UVD_CGC_GATE__IDCT_MASK |
677 UVD_CGC_GATE__MPRD_MASK |
678 UVD_CGC_GATE__MPC_MASK |
679 UVD_CGC_GATE__LBSI_MASK |
680 UVD_CGC_GATE__LRBBM_MASK |
681 UVD_CGC_GATE__UDEC_RE_MASK |
682 UVD_CGC_GATE__UDEC_CM_MASK |
683 UVD_CGC_GATE__UDEC_IT_MASK |
684 UVD_CGC_GATE__UDEC_DB_MASK |
685 UVD_CGC_GATE__UDEC_MP_MASK |
686 UVD_CGC_GATE__WCB_MASK |
687 UVD_CGC_GATE__VCPU_MASK |
688 UVD_CGC_GATE__SCPU_MASK);
689 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
690 UVD_SUVD_CGC_GATE__SIT_MASK |
691 UVD_SUVD_CGC_GATE__SMP_MASK |
692 UVD_SUVD_CGC_GATE__SCM_MASK |
693 UVD_SUVD_CGC_GATE__SDB_MASK |
694 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
695 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
696 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
697 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
698 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
699 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
700 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
701 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
703 WREG32(mmUVD_CGC_GATE, data);
704 WREG32(mmUVD_SUVD_CGC_GATE, data1);
709 * uvd_v6_0_start - start UVD block
711 * @adev: amdgpu_device pointer
713 * Setup and start the UVD block
715 static int uvd_v6_0_start(struct amdgpu_device *adev)
717 struct amdgpu_ring *ring = &adev->uvd.ring;
718 uint32_t rb_bufsz, tmp;
719 uint32_t lmi_swap_cntl;
720 uint32_t mp_swap_cntl;
724 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
726 /* disable byte swapping */
730 uvd_v6_0_mc_resume(adev);
732 /* disable interupt */
733 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
735 /* stall UMC and register bus before resetting VCPU */
736 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
739 /* put LMI, VCPU, RBC etc... into reset */
740 WREG32(mmUVD_SOFT_RESET,
741 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
742 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
743 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
744 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
745 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
746 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
747 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
748 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
751 /* take UVD block out of reset */
752 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
755 /* initialize UVD memory controller */
756 WREG32(mmUVD_LMI_CTRL,
757 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
758 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
759 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
760 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
761 UVD_LMI_CTRL__REQ_MODE_MASK |
762 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
765 /* swap (8 in 32) RB and IB */
769 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
770 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
772 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
773 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
774 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
775 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
776 WREG32(mmUVD_MPC_SET_ALU, 0);
777 WREG32(mmUVD_MPC_SET_MUX, 0x88);
779 /* take all subblocks out of reset, except VCPU */
780 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
783 /* enable VCPU clock */
784 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
787 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
789 /* boot up the VCPU */
790 WREG32(mmUVD_SOFT_RESET, 0);
793 for (i = 0; i < 10; ++i) {
796 for (j = 0; j < 100; ++j) {
797 status = RREG32(mmUVD_STATUS);
806 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
807 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
809 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
815 DRM_ERROR("UVD not responding, giving up!!!\n");
818 /* enable master interrupt */
819 WREG32_P(mmUVD_MASTINT_EN,
820 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
821 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
823 /* clear the bit 4 of UVD_STATUS */
824 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
826 /* force RBC into idle state */
827 rb_bufsz = order_base_2(ring->ring_size);
828 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
829 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
830 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
831 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
832 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
833 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
834 WREG32(mmUVD_RBC_RB_CNTL, tmp);
836 /* set the write pointer delay */
837 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
839 /* set the wb address */
840 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
842 /* programm the RB_BASE for ring buffer */
843 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
844 lower_32_bits(ring->gpu_addr));
845 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
846 upper_32_bits(ring->gpu_addr));
848 /* Initialize the ring buffer's read and write pointers */
849 WREG32(mmUVD_RBC_RB_RPTR, 0);
851 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
852 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
854 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
856 if (uvd_v6_0_enc_support(adev)) {
857 ring = &adev->uvd.ring_enc[0];
858 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
859 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
860 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
861 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
862 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
864 ring = &adev->uvd.ring_enc[1];
865 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
866 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
867 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
868 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
869 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
876 * uvd_v6_0_stop - stop UVD block
878 * @adev: amdgpu_device pointer
882 static void uvd_v6_0_stop(struct amdgpu_device *adev)
884 /* force RBC into idle state */
885 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
887 /* Stall UMC and register bus before resetting VCPU */
888 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
891 /* put VCPU into reset */
892 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
895 /* disable VCPU clock */
896 WREG32(mmUVD_VCPU_CNTL, 0x0);
898 /* Unstall UMC and register bus */
899 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
901 WREG32(mmUVD_STATUS, 0);
905 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
907 * @ring: amdgpu_ring pointer
908 * @fence: fence to emit
910 * Write a fence and a trap command to the ring.
912 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
915 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
917 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
918 amdgpu_ring_write(ring, seq);
919 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
920 amdgpu_ring_write(ring, addr & 0xffffffff);
921 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
922 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
923 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
924 amdgpu_ring_write(ring, 0);
926 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
927 amdgpu_ring_write(ring, 0);
928 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
929 amdgpu_ring_write(ring, 0);
930 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
931 amdgpu_ring_write(ring, 2);
935 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
937 * @ring: amdgpu_ring pointer
938 * @fence: fence to emit
940 * Write enc a fence and a trap command to the ring.
942 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
943 u64 seq, unsigned flags)
945 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
947 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
948 amdgpu_ring_write(ring, addr);
949 amdgpu_ring_write(ring, upper_32_bits(addr));
950 amdgpu_ring_write(ring, seq);
951 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
955 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
957 * @ring: amdgpu_ring pointer
959 * Emits an hdp flush.
961 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
963 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
964 amdgpu_ring_write(ring, 0);
968 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
970 * @ring: amdgpu_ring pointer
972 * Emits an hdp invalidate.
974 static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
976 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
977 amdgpu_ring_write(ring, 1);
981 * uvd_v6_0_ring_test_ring - register write test
983 * @ring: amdgpu_ring pointer
985 * Test if we can successfully write to the context register
987 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
989 struct amdgpu_device *adev = ring->adev;
994 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
995 r = amdgpu_ring_alloc(ring, 3);
997 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
1001 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
1002 amdgpu_ring_write(ring, 0xDEADBEEF);
1003 amdgpu_ring_commit(ring);
1004 for (i = 0; i < adev->usec_timeout; i++) {
1005 tmp = RREG32(mmUVD_CONTEXT_ID);
1006 if (tmp == 0xDEADBEEF)
1011 if (i < adev->usec_timeout) {
1012 DRM_INFO("ring test on %d succeeded in %d usecs\n",
1015 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
1023 * uvd_v6_0_ring_emit_ib - execute indirect buffer
1025 * @ring: amdgpu_ring pointer
1026 * @ib: indirect buffer to execute
1028 * Write ring commands to execute the indirect buffer
1030 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1031 struct amdgpu_ib *ib,
1032 unsigned vm_id, bool ctx_switch)
1034 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1035 amdgpu_ring_write(ring, vm_id);
1037 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1038 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1039 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1040 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1041 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1042 amdgpu_ring_write(ring, ib->length_dw);
1046 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1048 * @ring: amdgpu_ring pointer
1049 * @ib: indirect buffer to execute
1051 * Write enc ring commands to execute the indirect buffer
1053 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1054 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
1056 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1057 amdgpu_ring_write(ring, vm_id);
1058 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1059 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1060 amdgpu_ring_write(ring, ib->length_dw);
1063 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1064 unsigned vm_id, uint64_t pd_addr)
1069 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
1071 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
1073 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1074 amdgpu_ring_write(ring, reg << 2);
1075 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1076 amdgpu_ring_write(ring, pd_addr >> 12);
1077 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1078 amdgpu_ring_write(ring, 0x8);
1080 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1081 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1082 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1083 amdgpu_ring_write(ring, 1 << vm_id);
1084 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1085 amdgpu_ring_write(ring, 0x8);
1087 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1088 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1089 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1090 amdgpu_ring_write(ring, 0);
1091 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1092 amdgpu_ring_write(ring, 1 << vm_id); /* mask */
1093 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1094 amdgpu_ring_write(ring, 0xC);
1097 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1099 uint32_t seq = ring->fence_drv.sync_seq;
1100 uint64_t addr = ring->fence_drv.gpu_addr;
1102 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1103 amdgpu_ring_write(ring, lower_32_bits(addr));
1104 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1105 amdgpu_ring_write(ring, upper_32_bits(addr));
1106 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1107 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1108 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1109 amdgpu_ring_write(ring, seq);
1110 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1111 amdgpu_ring_write(ring, 0xE);
1114 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1116 uint32_t seq = ring->fence_drv.sync_seq;
1117 uint64_t addr = ring->fence_drv.gpu_addr;
1119 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1120 amdgpu_ring_write(ring, lower_32_bits(addr));
1121 amdgpu_ring_write(ring, upper_32_bits(addr));
1122 amdgpu_ring_write(ring, seq);
1125 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1127 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1130 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1131 unsigned int vm_id, uint64_t pd_addr)
1133 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1134 amdgpu_ring_write(ring, vm_id);
1135 amdgpu_ring_write(ring, pd_addr >> 12);
1137 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1138 amdgpu_ring_write(ring, vm_id);
1141 static bool uvd_v6_0_is_idle(void *handle)
1143 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1145 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1148 static int uvd_v6_0_wait_for_idle(void *handle)
1151 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1153 for (i = 0; i < adev->usec_timeout; i++) {
1154 if (uvd_v6_0_is_idle(handle))
1160 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1161 static bool uvd_v6_0_check_soft_reset(void *handle)
1163 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1164 u32 srbm_soft_reset = 0;
1165 u32 tmp = RREG32(mmSRBM_STATUS);
1167 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1168 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1169 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1170 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1172 if (srbm_soft_reset) {
1173 adev->uvd.srbm_soft_reset = srbm_soft_reset;
1176 adev->uvd.srbm_soft_reset = 0;
1181 static int uvd_v6_0_pre_soft_reset(void *handle)
1183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1185 if (!adev->uvd.srbm_soft_reset)
1188 uvd_v6_0_stop(adev);
1192 static int uvd_v6_0_soft_reset(void *handle)
1194 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1195 u32 srbm_soft_reset;
1197 if (!adev->uvd.srbm_soft_reset)
1199 srbm_soft_reset = adev->uvd.srbm_soft_reset;
1201 if (srbm_soft_reset) {
1204 tmp = RREG32(mmSRBM_SOFT_RESET);
1205 tmp |= srbm_soft_reset;
1206 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1207 WREG32(mmSRBM_SOFT_RESET, tmp);
1208 tmp = RREG32(mmSRBM_SOFT_RESET);
1212 tmp &= ~srbm_soft_reset;
1213 WREG32(mmSRBM_SOFT_RESET, tmp);
1214 tmp = RREG32(mmSRBM_SOFT_RESET);
1216 /* Wait a little for things to settle down */
1223 static int uvd_v6_0_post_soft_reset(void *handle)
1225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227 if (!adev->uvd.srbm_soft_reset)
1232 return uvd_v6_0_start(adev);
1235 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1236 struct amdgpu_irq_src *source,
1238 enum amdgpu_interrupt_state state)
1244 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1245 struct amdgpu_irq_src *source,
1246 struct amdgpu_iv_entry *entry)
1248 bool int_handled = true;
1249 DRM_DEBUG("IH: UVD TRAP\n");
1251 switch (entry->src_id) {
1253 amdgpu_fence_process(&adev->uvd.ring);
1256 if (likely(uvd_v6_0_enc_support(adev)))
1257 amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1259 int_handled = false;
1262 if (likely(uvd_v6_0_enc_support(adev)))
1263 amdgpu_fence_process(&adev->uvd.ring_enc[1]);
1265 int_handled = false;
1269 if (false == int_handled)
1270 DRM_ERROR("Unhandled interrupt: %d %d\n",
1271 entry->src_id, entry->src_data[0]);
1276 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1278 uint32_t data1, data3;
1280 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1281 data3 = RREG32(mmUVD_CGC_GATE);
1283 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1284 UVD_SUVD_CGC_GATE__SIT_MASK |
1285 UVD_SUVD_CGC_GATE__SMP_MASK |
1286 UVD_SUVD_CGC_GATE__SCM_MASK |
1287 UVD_SUVD_CGC_GATE__SDB_MASK |
1288 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1289 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1290 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1291 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1292 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1293 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1294 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1295 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1298 data3 |= (UVD_CGC_GATE__SYS_MASK |
1299 UVD_CGC_GATE__UDEC_MASK |
1300 UVD_CGC_GATE__MPEG2_MASK |
1301 UVD_CGC_GATE__RBC_MASK |
1302 UVD_CGC_GATE__LMI_MC_MASK |
1303 UVD_CGC_GATE__LMI_UMC_MASK |
1304 UVD_CGC_GATE__IDCT_MASK |
1305 UVD_CGC_GATE__MPRD_MASK |
1306 UVD_CGC_GATE__MPC_MASK |
1307 UVD_CGC_GATE__LBSI_MASK |
1308 UVD_CGC_GATE__LRBBM_MASK |
1309 UVD_CGC_GATE__UDEC_RE_MASK |
1310 UVD_CGC_GATE__UDEC_CM_MASK |
1311 UVD_CGC_GATE__UDEC_IT_MASK |
1312 UVD_CGC_GATE__UDEC_DB_MASK |
1313 UVD_CGC_GATE__UDEC_MP_MASK |
1314 UVD_CGC_GATE__WCB_MASK |
1315 UVD_CGC_GATE__JPEG_MASK |
1316 UVD_CGC_GATE__SCPU_MASK |
1317 UVD_CGC_GATE__JPEG2_MASK);
1318 /* only in pg enabled, we can gate clock to vcpu*/
1319 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1320 data3 |= UVD_CGC_GATE__VCPU_MASK;
1322 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1327 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1328 WREG32(mmUVD_CGC_GATE, data3);
1331 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1333 uint32_t data, data2;
1335 data = RREG32(mmUVD_CGC_CTRL);
1336 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1339 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1340 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1343 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1344 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1345 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1347 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1348 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1349 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1350 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1351 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1352 UVD_CGC_CTRL__SYS_MODE_MASK |
1353 UVD_CGC_CTRL__UDEC_MODE_MASK |
1354 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1355 UVD_CGC_CTRL__REGS_MODE_MASK |
1356 UVD_CGC_CTRL__RBC_MODE_MASK |
1357 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1358 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1359 UVD_CGC_CTRL__IDCT_MODE_MASK |
1360 UVD_CGC_CTRL__MPRD_MODE_MASK |
1361 UVD_CGC_CTRL__MPC_MODE_MASK |
1362 UVD_CGC_CTRL__LBSI_MODE_MASK |
1363 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1364 UVD_CGC_CTRL__WCB_MODE_MASK |
1365 UVD_CGC_CTRL__VCPU_MODE_MASK |
1366 UVD_CGC_CTRL__JPEG_MODE_MASK |
1367 UVD_CGC_CTRL__SCPU_MODE_MASK |
1368 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1369 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1370 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1371 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1372 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1373 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1375 WREG32(mmUVD_CGC_CTRL, data);
1376 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1380 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1382 uint32_t data, data1, cgc_flags, suvd_flags;
1384 data = RREG32(mmUVD_CGC_GATE);
1385 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1387 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1388 UVD_CGC_GATE__UDEC_MASK |
1389 UVD_CGC_GATE__MPEG2_MASK |
1390 UVD_CGC_GATE__RBC_MASK |
1391 UVD_CGC_GATE__LMI_MC_MASK |
1392 UVD_CGC_GATE__IDCT_MASK |
1393 UVD_CGC_GATE__MPRD_MASK |
1394 UVD_CGC_GATE__MPC_MASK |
1395 UVD_CGC_GATE__LBSI_MASK |
1396 UVD_CGC_GATE__LRBBM_MASK |
1397 UVD_CGC_GATE__UDEC_RE_MASK |
1398 UVD_CGC_GATE__UDEC_CM_MASK |
1399 UVD_CGC_GATE__UDEC_IT_MASK |
1400 UVD_CGC_GATE__UDEC_DB_MASK |
1401 UVD_CGC_GATE__UDEC_MP_MASK |
1402 UVD_CGC_GATE__WCB_MASK |
1403 UVD_CGC_GATE__VCPU_MASK |
1404 UVD_CGC_GATE__SCPU_MASK |
1405 UVD_CGC_GATE__JPEG_MASK |
1406 UVD_CGC_GATE__JPEG2_MASK;
1408 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1409 UVD_SUVD_CGC_GATE__SIT_MASK |
1410 UVD_SUVD_CGC_GATE__SMP_MASK |
1411 UVD_SUVD_CGC_GATE__SCM_MASK |
1412 UVD_SUVD_CGC_GATE__SDB_MASK;
1415 data1 |= suvd_flags;
1417 WREG32(mmUVD_CGC_GATE, data);
1418 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1422 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1427 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1428 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1430 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1432 orig = data = RREG32(mmUVD_CGC_CTRL);
1433 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1435 WREG32(mmUVD_CGC_CTRL, data);
1437 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1439 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1441 orig = data = RREG32(mmUVD_CGC_CTRL);
1442 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1444 WREG32(mmUVD_CGC_CTRL, data);
1448 static int uvd_v6_0_set_clockgating_state(void *handle,
1449 enum amd_clockgating_state state)
1451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1455 /* wait for STATUS to clear */
1456 if (uvd_v6_0_wait_for_idle(handle))
1458 uvd_v6_0_enable_clock_gating(adev, true);
1459 /* enable HW gates because UVD is idle */
1460 /* uvd_v6_0_set_hw_clock_gating(adev); */
1462 /* disable HW gating and enable Sw gating */
1463 uvd_v6_0_enable_clock_gating(adev, false);
1465 uvd_v6_0_set_sw_clock_gating(adev);
1469 static int uvd_v6_0_set_powergating_state(void *handle,
1470 enum amd_powergating_state state)
1472 /* This doesn't actually powergate the UVD block.
1473 * That's done in the dpm code via the SMC. This
1474 * just re-inits the block as necessary. The actual
1475 * gating still happens in the dpm code. We should
1476 * revisit this when there is a cleaner line between
1477 * the smc and the hw blocks
1479 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1482 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1484 if (state == AMD_PG_STATE_GATE) {
1485 uvd_v6_0_stop(adev);
1487 ret = uvd_v6_0_start(adev);
1496 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1498 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1501 mutex_lock(&adev->pm.mutex);
1503 if (adev->flags & AMD_IS_APU)
1504 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1506 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1508 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1509 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1513 /* AMD_CG_SUPPORT_UVD_MGCG */
1514 data = RREG32(mmUVD_CGC_CTRL);
1515 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1516 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1519 mutex_unlock(&adev->pm.mutex);
1522 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1524 .early_init = uvd_v6_0_early_init,
1526 .sw_init = uvd_v6_0_sw_init,
1527 .sw_fini = uvd_v6_0_sw_fini,
1528 .hw_init = uvd_v6_0_hw_init,
1529 .hw_fini = uvd_v6_0_hw_fini,
1530 .suspend = uvd_v6_0_suspend,
1531 .resume = uvd_v6_0_resume,
1532 .is_idle = uvd_v6_0_is_idle,
1533 .wait_for_idle = uvd_v6_0_wait_for_idle,
1534 .check_soft_reset = uvd_v6_0_check_soft_reset,
1535 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1536 .soft_reset = uvd_v6_0_soft_reset,
1537 .post_soft_reset = uvd_v6_0_post_soft_reset,
1538 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1539 .set_powergating_state = uvd_v6_0_set_powergating_state,
1540 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1543 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1544 .type = AMDGPU_RING_TYPE_UVD,
1546 .nop = PACKET0(mmUVD_NO_OP, 0),
1547 .support_64bit_ptrs = false,
1548 .get_rptr = uvd_v6_0_ring_get_rptr,
1549 .get_wptr = uvd_v6_0_ring_get_wptr,
1550 .set_wptr = uvd_v6_0_ring_set_wptr,
1551 .parse_cs = amdgpu_uvd_ring_parse_cs,
1553 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1554 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1555 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1556 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1557 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1558 .emit_ib = uvd_v6_0_ring_emit_ib,
1559 .emit_fence = uvd_v6_0_ring_emit_fence,
1560 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1561 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1562 .test_ring = uvd_v6_0_ring_test_ring,
1563 .test_ib = amdgpu_uvd_ring_test_ib,
1564 .insert_nop = amdgpu_ring_insert_nop,
1565 .pad_ib = amdgpu_ring_generic_pad_ib,
1566 .begin_use = amdgpu_uvd_ring_begin_use,
1567 .end_use = amdgpu_uvd_ring_end_use,
1570 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1571 .type = AMDGPU_RING_TYPE_UVD,
1573 .nop = PACKET0(mmUVD_NO_OP, 0),
1574 .support_64bit_ptrs = false,
1575 .get_rptr = uvd_v6_0_ring_get_rptr,
1576 .get_wptr = uvd_v6_0_ring_get_wptr,
1577 .set_wptr = uvd_v6_0_ring_set_wptr,
1579 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1580 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1581 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1582 20 + /* uvd_v6_0_ring_emit_vm_flush */
1583 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1584 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1585 .emit_ib = uvd_v6_0_ring_emit_ib,
1586 .emit_fence = uvd_v6_0_ring_emit_fence,
1587 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1588 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1589 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1590 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1591 .test_ring = uvd_v6_0_ring_test_ring,
1592 .test_ib = amdgpu_uvd_ring_test_ib,
1593 .insert_nop = amdgpu_ring_insert_nop,
1594 .pad_ib = amdgpu_ring_generic_pad_ib,
1595 .begin_use = amdgpu_uvd_ring_begin_use,
1596 .end_use = amdgpu_uvd_ring_end_use,
1599 static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1600 .type = AMDGPU_RING_TYPE_UVD_ENC,
1602 .nop = HEVC_ENC_CMD_NO_OP,
1603 .support_64bit_ptrs = false,
1604 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1605 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1606 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1608 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1609 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1610 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1611 1, /* uvd_v6_0_enc_ring_insert_end */
1612 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1613 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1614 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1615 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1616 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1617 .test_ring = uvd_v6_0_enc_ring_test_ring,
1618 .test_ib = uvd_v6_0_enc_ring_test_ib,
1619 .insert_nop = amdgpu_ring_insert_nop,
1620 .insert_end = uvd_v6_0_enc_ring_insert_end,
1621 .pad_ib = amdgpu_ring_generic_pad_ib,
1622 .begin_use = amdgpu_uvd_ring_begin_use,
1623 .end_use = amdgpu_uvd_ring_end_use,
1626 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1628 if (adev->asic_type >= CHIP_POLARIS10) {
1629 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1630 DRM_INFO("UVD is enabled in VM mode\n");
1632 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1633 DRM_INFO("UVD is enabled in physical mode\n");
1637 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1641 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1642 adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1644 DRM_INFO("UVD ENC is enabled in VM mode\n");
1647 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1648 .set = uvd_v6_0_set_interrupt_state,
1649 .process = uvd_v6_0_process_interrupt,
1652 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1654 if (uvd_v6_0_enc_support(adev))
1655 adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
1657 adev->uvd.irq.num_types = 1;
1659 adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1662 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1664 .type = AMD_IP_BLOCK_TYPE_UVD,
1668 .funcs = &uvd_v6_0_ip_funcs,
1671 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1673 .type = AMD_IP_BLOCK_TYPE_UVD,
1677 .funcs = &uvd_v6_0_ip_funcs,
1680 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1682 .type = AMD_IP_BLOCK_TYPE_UVD,
1686 .funcs = &uvd_v6_0_ip_funcs,