2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
33 #include "mmsch_v1_0.h"
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
40 #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
56 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
58 static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
59 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
60 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
61 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
62 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
63 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
64 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
82 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
83 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
84 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
85 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
86 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
87 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
88 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
89 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
90 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
91 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
92 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
95 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
96 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
97 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
98 static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
99 enum amd_powergating_state state);
100 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
101 int inst_idx, struct dpg_pause_state *new_state);
102 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
103 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
105 static int amdgpu_ih_clientid_vcns[] = {
106 SOC15_IH_CLIENTID_VCN,
107 SOC15_IH_CLIENTID_VCN1
111 * vcn_v2_5_early_init - set function pointers and load microcode
113 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
115 * Set ring and irq function pointers
116 * Load microcode from filesystem
118 static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
120 struct amdgpu_device *adev = ip_block->adev;
122 if (amdgpu_sriov_vf(adev)) {
123 adev->vcn.num_vcn_inst = 2;
124 adev->vcn.harvest_config = 0;
125 adev->vcn.num_enc_rings = 1;
130 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
131 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
132 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
133 adev->vcn.harvest_config |= 1 << i;
135 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
136 AMDGPU_VCN_HARVEST_VCN1))
137 /* both instances are harvested, disable the block */
140 adev->vcn.num_enc_rings = 2;
143 vcn_v2_5_set_dec_ring_funcs(adev);
144 vcn_v2_5_set_enc_ring_funcs(adev);
145 vcn_v2_5_set_irq_funcs(adev);
146 vcn_v2_5_set_ras_funcs(adev);
148 return amdgpu_vcn_early_init(adev);
152 * vcn_v2_5_sw_init - sw init for VCN block
154 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
156 * Load firmware and sw initialization
158 static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
160 struct amdgpu_ring *ring;
162 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
164 struct amdgpu_device *adev = ip_block->adev;
166 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
167 if (adev->vcn.harvest_config & (1 << j))
170 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
171 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
176 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
177 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
178 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
183 /* VCN POISON TRAP */
184 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
185 VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
190 r = amdgpu_vcn_sw_init(adev);
194 amdgpu_vcn_setup_ucode(adev);
196 r = amdgpu_vcn_resume(adev);
200 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
201 volatile struct amdgpu_fw_shared *fw_shared;
203 if (adev->vcn.harvest_config & (1 << j))
205 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
206 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
207 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
208 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
209 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
210 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
212 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
213 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
214 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
215 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
216 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
217 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
218 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
219 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
220 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
221 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
223 ring = &adev->vcn.inst[j].ring_dec;
224 ring->use_doorbell = true;
226 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
227 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
229 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0))
230 ring->vm_hub = AMDGPU_MMHUB1(0);
232 ring->vm_hub = AMDGPU_MMHUB0(0);
234 sprintf(ring->name, "vcn_dec_%d", j);
235 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
236 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
240 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
241 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
243 ring = &adev->vcn.inst[j].ring_enc[i];
244 ring->use_doorbell = true;
246 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
247 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
249 if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
251 ring->vm_hub = AMDGPU_MMHUB1(0);
253 ring->vm_hub = AMDGPU_MMHUB0(0);
255 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
256 r = amdgpu_ring_init(adev, ring, 512,
257 &adev->vcn.inst[j].irq, 0,
263 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
264 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
266 if (amdgpu_vcnfw_log)
267 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
270 if (amdgpu_sriov_vf(adev)) {
271 r = amdgpu_virt_alloc_mm_table(adev);
276 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
277 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
279 r = amdgpu_vcn_ras_sw_init(adev);
283 /* Allocate memory for VCN IP Dump buffer */
284 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
286 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
287 adev->vcn.ip_dump = NULL;
289 adev->vcn.ip_dump = ptr;
296 * vcn_v2_5_sw_fini - sw fini for VCN block
298 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
300 * VCN suspend and free up sw allocation
302 static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
305 struct amdgpu_device *adev = ip_block->adev;
306 volatile struct amdgpu_fw_shared *fw_shared;
308 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
309 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
310 if (adev->vcn.harvest_config & (1 << i))
312 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
313 fw_shared->present_flag_0 = 0;
319 if (amdgpu_sriov_vf(adev))
320 amdgpu_virt_free_mm_table(adev);
322 r = amdgpu_vcn_suspend(adev);
326 r = amdgpu_vcn_sw_fini(adev);
328 kfree(adev->vcn.ip_dump);
334 * vcn_v2_5_hw_init - start and test VCN block
336 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
338 * Initialize the hardware, boot up the VCPU and do some testing
340 static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
342 struct amdgpu_device *adev = ip_block->adev;
343 struct amdgpu_ring *ring;
346 if (amdgpu_sriov_vf(adev))
347 r = vcn_v2_5_sriov_start(adev);
349 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
350 if (adev->vcn.harvest_config & (1 << j))
353 if (amdgpu_sriov_vf(adev)) {
354 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
355 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
356 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
357 adev->vcn.inst[j].ring_dec.sched.ready = true;
360 ring = &adev->vcn.inst[j].ring_dec;
362 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
363 ring->doorbell_index, j);
365 r = amdgpu_ring_test_helper(ring);
369 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
370 ring = &adev->vcn.inst[j].ring_enc[i];
371 r = amdgpu_ring_test_helper(ring);
382 * vcn_v2_5_hw_fini - stop the hardware block
384 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
386 * Stop the VCN block, mark ring as not ready any more
388 static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
390 struct amdgpu_device *adev = ip_block->adev;
393 cancel_delayed_work_sync(&adev->vcn.idle_work);
395 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
396 if (adev->vcn.harvest_config & (1 << i))
399 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
400 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
401 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
402 vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
404 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
405 amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
412 * vcn_v2_5_suspend - suspend VCN block
414 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
416 * HW fini and suspend VCN block
418 static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
422 r = vcn_v2_5_hw_fini(ip_block);
426 r = amdgpu_vcn_suspend(ip_block->adev);
432 * vcn_v2_5_resume - resume VCN block
434 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
436 * Resume firmware and hw init VCN block
438 static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
442 r = amdgpu_vcn_resume(ip_block->adev);
446 r = vcn_v2_5_hw_init(ip_block);
452 * vcn_v2_5_mc_resume - memory controller programming
454 * @adev: amdgpu_device pointer
456 * Let the VCN memory controller know it's offsets
458 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
464 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
465 if (adev->vcn.harvest_config & (1 << i))
468 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
469 /* cache window 0: fw */
470 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
471 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
472 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
473 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
474 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
475 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
478 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
479 lower_32_bits(adev->vcn.inst[i].gpu_addr));
480 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
481 upper_32_bits(adev->vcn.inst[i].gpu_addr));
483 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
484 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
486 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
488 /* cache window 1: stack */
489 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
490 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
491 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
492 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
493 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
494 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
496 /* cache window 2: context */
497 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
498 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
499 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
500 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
501 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
502 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
504 /* non-cache window */
505 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
506 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
507 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
508 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
509 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
510 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
511 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
515 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
517 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
520 /* cache window 0: fw */
521 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
525 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
526 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
528 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
529 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
530 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
532 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
533 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
534 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
535 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
536 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
537 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
541 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
543 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
544 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
545 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
546 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
548 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
549 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
550 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
554 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
555 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
557 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
560 /* cache window 1: stack */
562 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
563 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
564 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
565 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
566 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
567 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
568 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
569 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
571 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
572 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
573 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
574 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
575 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
576 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
578 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
579 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
581 /* cache window 2: context */
582 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
583 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
584 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
585 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
586 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
587 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
588 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
589 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
590 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
591 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
593 /* non-cache window */
594 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
595 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
596 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
597 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
598 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
599 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
600 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
601 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
602 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
603 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
604 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
606 /* VCN global tiling registers */
607 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
608 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
612 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
614 * @adev: amdgpu_device pointer
616 * Disable clock gating for VCN block
618 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
623 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
624 if (adev->vcn.harvest_config & (1 << i))
626 /* UVD disable CGC */
627 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
628 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
629 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
631 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
632 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
633 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
634 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
636 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
637 data &= ~(UVD_CGC_GATE__SYS_MASK
638 | UVD_CGC_GATE__UDEC_MASK
639 | UVD_CGC_GATE__MPEG2_MASK
640 | UVD_CGC_GATE__REGS_MASK
641 | UVD_CGC_GATE__RBC_MASK
642 | UVD_CGC_GATE__LMI_MC_MASK
643 | UVD_CGC_GATE__LMI_UMC_MASK
644 | UVD_CGC_GATE__IDCT_MASK
645 | UVD_CGC_GATE__MPRD_MASK
646 | UVD_CGC_GATE__MPC_MASK
647 | UVD_CGC_GATE__LBSI_MASK
648 | UVD_CGC_GATE__LRBBM_MASK
649 | UVD_CGC_GATE__UDEC_RE_MASK
650 | UVD_CGC_GATE__UDEC_CM_MASK
651 | UVD_CGC_GATE__UDEC_IT_MASK
652 | UVD_CGC_GATE__UDEC_DB_MASK
653 | UVD_CGC_GATE__UDEC_MP_MASK
654 | UVD_CGC_GATE__WCB_MASK
655 | UVD_CGC_GATE__VCPU_MASK
656 | UVD_CGC_GATE__MMSCH_MASK);
658 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
660 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
662 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
663 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
664 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
665 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
666 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
667 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
668 | UVD_CGC_CTRL__SYS_MODE_MASK
669 | UVD_CGC_CTRL__UDEC_MODE_MASK
670 | UVD_CGC_CTRL__MPEG2_MODE_MASK
671 | UVD_CGC_CTRL__REGS_MODE_MASK
672 | UVD_CGC_CTRL__RBC_MODE_MASK
673 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
674 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
675 | UVD_CGC_CTRL__IDCT_MODE_MASK
676 | UVD_CGC_CTRL__MPRD_MODE_MASK
677 | UVD_CGC_CTRL__MPC_MODE_MASK
678 | UVD_CGC_CTRL__LBSI_MODE_MASK
679 | UVD_CGC_CTRL__LRBBM_MODE_MASK
680 | UVD_CGC_CTRL__WCB_MODE_MASK
681 | UVD_CGC_CTRL__VCPU_MODE_MASK
682 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
683 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
686 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
687 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
688 | UVD_SUVD_CGC_GATE__SIT_MASK
689 | UVD_SUVD_CGC_GATE__SMP_MASK
690 | UVD_SUVD_CGC_GATE__SCM_MASK
691 | UVD_SUVD_CGC_GATE__SDB_MASK
692 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
693 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
694 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
695 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
696 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
697 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
698 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
699 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
700 | UVD_SUVD_CGC_GATE__SCLR_MASK
701 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
702 | UVD_SUVD_CGC_GATE__ENT_MASK
703 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
704 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
705 | UVD_SUVD_CGC_GATE__SITE_MASK
706 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
707 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
708 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
709 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
710 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
711 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
713 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
714 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
715 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
716 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
717 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
718 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
719 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
720 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
721 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
722 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
723 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
724 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
728 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
729 uint8_t sram_sel, int inst_idx, uint8_t indirect)
731 uint32_t reg_data = 0;
733 /* enable sw clock gating control */
734 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
735 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
737 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
738 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
739 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
740 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
741 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
742 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
743 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
744 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
745 UVD_CGC_CTRL__SYS_MODE_MASK |
746 UVD_CGC_CTRL__UDEC_MODE_MASK |
747 UVD_CGC_CTRL__MPEG2_MODE_MASK |
748 UVD_CGC_CTRL__REGS_MODE_MASK |
749 UVD_CGC_CTRL__RBC_MODE_MASK |
750 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
751 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
752 UVD_CGC_CTRL__IDCT_MODE_MASK |
753 UVD_CGC_CTRL__MPRD_MODE_MASK |
754 UVD_CGC_CTRL__MPC_MODE_MASK |
755 UVD_CGC_CTRL__LBSI_MODE_MASK |
756 UVD_CGC_CTRL__LRBBM_MODE_MASK |
757 UVD_CGC_CTRL__WCB_MODE_MASK |
758 UVD_CGC_CTRL__VCPU_MODE_MASK |
759 UVD_CGC_CTRL__MMSCH_MODE_MASK);
760 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
761 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
763 /* turn off clock gating */
764 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
765 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
767 /* turn on SUVD clock gating */
768 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
769 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
771 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
772 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
773 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
777 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
779 * @adev: amdgpu_device pointer
781 * Enable clock gating for VCN block
783 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
788 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
789 if (adev->vcn.harvest_config & (1 << i))
792 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
793 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
794 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
796 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
797 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
798 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
799 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
801 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
802 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
803 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
804 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
805 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
806 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
807 | UVD_CGC_CTRL__SYS_MODE_MASK
808 | UVD_CGC_CTRL__UDEC_MODE_MASK
809 | UVD_CGC_CTRL__MPEG2_MODE_MASK
810 | UVD_CGC_CTRL__REGS_MODE_MASK
811 | UVD_CGC_CTRL__RBC_MODE_MASK
812 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
813 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
814 | UVD_CGC_CTRL__IDCT_MODE_MASK
815 | UVD_CGC_CTRL__MPRD_MODE_MASK
816 | UVD_CGC_CTRL__MPC_MODE_MASK
817 | UVD_CGC_CTRL__LBSI_MODE_MASK
818 | UVD_CGC_CTRL__LRBBM_MODE_MASK
819 | UVD_CGC_CTRL__WCB_MODE_MASK
820 | UVD_CGC_CTRL__VCPU_MODE_MASK);
821 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
823 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
824 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
825 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
826 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
827 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
828 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
829 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
830 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
831 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
832 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
833 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
834 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
838 static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
843 if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
846 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
847 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
848 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
849 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
850 WREG32_SOC15_DPG_MODE(inst_idx,
851 SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
854 tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
855 WREG32_SOC15_DPG_MODE(inst_idx,
856 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
859 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
860 WREG32_SOC15_DPG_MODE(inst_idx,
861 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
865 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
867 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
868 struct amdgpu_ring *ring;
869 uint32_t rb_bufsz, tmp;
871 /* disable register anti-hang mechanism */
872 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
873 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
874 /* enable dynamic power gating mode */
875 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
876 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
877 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
878 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
881 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
883 /* enable clock gating */
884 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
886 /* enable VCPU clock */
887 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
888 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
889 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
890 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
891 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
893 /* disable master interupt */
894 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
895 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
897 /* setup mmUVD_LMI_CTRL */
898 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
899 UVD_LMI_CTRL__REQ_MODE_MASK |
900 UVD_LMI_CTRL__CRC_RESET_MASK |
901 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
902 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
903 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
904 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
906 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
907 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
909 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
910 VCN, 0, mmUVD_MPC_CNTL),
911 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
913 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
914 VCN, 0, mmUVD_MPC_SET_MUXA0),
915 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
916 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
917 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
918 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
920 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
921 VCN, 0, mmUVD_MPC_SET_MUXB0),
922 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
923 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
924 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
925 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
927 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
928 VCN, 0, mmUVD_MPC_SET_MUX),
929 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
930 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
931 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
933 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
935 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
936 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
937 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
938 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
940 /* enable LMI MC and UMC channels */
941 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
942 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
944 vcn_v2_6_enable_ras(adev, inst_idx, indirect);
946 /* unblock VCPU register access */
947 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
948 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
950 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
951 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
952 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
953 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
955 /* enable master interrupt */
956 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
957 VCN, 0, mmUVD_MASTINT_EN),
958 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
961 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
963 ring = &adev->vcn.inst[inst_idx].ring_dec;
964 /* force RBC into idle state */
965 rb_bufsz = order_base_2(ring->ring_size);
966 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
967 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
968 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
969 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
970 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
971 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
973 /* Stall DPG before WPTR/RPTR reset */
974 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
975 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
976 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
977 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
979 /* set the write pointer delay */
980 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
982 /* set the wb address */
983 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
984 (upper_32_bits(ring->gpu_addr) >> 2));
986 /* program the RB_BASE for ring buffer */
987 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
988 lower_32_bits(ring->gpu_addr));
989 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
990 upper_32_bits(ring->gpu_addr));
992 /* Initialize the ring buffer's read and write pointers */
993 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
995 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
997 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
998 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
999 lower_32_bits(ring->wptr));
1001 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1003 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1004 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1009 static int vcn_v2_5_start(struct amdgpu_device *adev)
1011 struct amdgpu_ring *ring;
1012 uint32_t rb_bufsz, tmp;
1015 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1016 if (adev->pm.dpm_enabled)
1017 amdgpu_dpm_enable_vcn(adev, true, i);
1020 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1021 if (adev->vcn.harvest_config & (1 << i))
1023 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1024 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1028 /* disable register anti-hang mechanism */
1029 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
1030 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1032 /* set uvd status busy */
1033 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1034 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1037 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1040 /*SW clock gating */
1041 vcn_v2_5_disable_clock_gating(adev);
1043 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1044 if (adev->vcn.harvest_config & (1 << i))
1046 /* enable VCPU clock */
1047 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1048 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1050 /* disable master interrupt */
1051 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1052 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1054 /* setup mmUVD_LMI_CTRL */
1055 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1057 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1058 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1059 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1060 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1061 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1063 /* setup mmUVD_MPC_CNTL */
1064 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1065 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1066 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1067 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1069 /* setup UVD_MPC_SET_MUXA0 */
1070 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1071 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1072 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1073 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1074 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1076 /* setup UVD_MPC_SET_MUXB0 */
1077 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1078 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1079 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1080 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1081 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1083 /* setup mmUVD_MPC_SET_MUX */
1084 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1085 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1086 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1087 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1090 vcn_v2_5_mc_resume(adev);
1092 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1093 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1094 if (adev->vcn.harvest_config & (1 << i))
1096 /* VCN global tiling registers */
1097 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1098 adev->gfx.config.gb_addr_config);
1099 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1100 adev->gfx.config.gb_addr_config);
1102 /* enable LMI MC and UMC channels */
1103 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1104 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1106 /* unblock VCPU register access */
1107 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1108 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1110 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1111 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1113 for (k = 0; k < 10; ++k) {
1116 for (j = 0; j < 100; ++j) {
1117 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1120 if (amdgpu_emu_mode == 1)
1129 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1130 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1131 UVD_VCPU_CNTL__BLK_RST_MASK,
1132 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1134 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1135 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1142 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1146 /* enable master interrupt */
1147 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1148 UVD_MASTINT_EN__VCPU_EN_MASK,
1149 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1151 /* clear the busy bit of VCN_STATUS */
1152 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1153 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1155 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1157 ring = &adev->vcn.inst[i].ring_dec;
1158 /* force RBC into idle state */
1159 rb_bufsz = order_base_2(ring->ring_size);
1160 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1161 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1162 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1163 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1164 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1165 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1167 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1168 /* program the RB_BASE for ring buffer */
1169 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1170 lower_32_bits(ring->gpu_addr));
1171 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1172 upper_32_bits(ring->gpu_addr));
1174 /* Initialize the ring buffer's read and write pointers */
1175 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1177 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1178 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1179 lower_32_bits(ring->wptr));
1180 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1182 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1183 ring = &adev->vcn.inst[i].ring_enc[0];
1184 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1185 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1186 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1187 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1188 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1189 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1191 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1192 ring = &adev->vcn.inst[i].ring_enc[1];
1193 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1194 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1195 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1196 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1197 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1198 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1204 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1205 struct amdgpu_mm_table *table)
1207 uint32_t data = 0, loop = 0, size = 0;
1208 uint64_t addr = table->gpu_addr;
1209 struct mmsch_v1_1_init_header *header = NULL;
1211 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1212 size = header->total_size;
1215 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1216 * memory descriptor location
1218 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1219 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1221 /* 2, update vmid of descriptor */
1222 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1223 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1224 /* use domain0 for MM scheduler */
1225 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1226 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1228 /* 3, notify mmsch about the size of this descriptor */
1229 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1231 /* 4, set resp to zero */
1232 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1235 * 5, kick off the initialization and wait until
1236 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1238 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1240 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1242 while ((data & 0x10000002) != 0x10000002) {
1244 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1252 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1260 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1262 struct amdgpu_ring *ring;
1263 uint32_t offset, size, tmp, i, rb_bufsz;
1264 uint32_t table_size = 0;
1265 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1266 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1267 struct mmsch_v1_0_cmd_end end = { { 0 } };
1268 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1269 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1271 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1272 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1273 end.cmd_header.command_type = MMSCH_COMMAND__END;
1275 header->version = MMSCH_VERSION;
1276 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1277 init_table += header->total_size;
1279 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1280 header->eng[i].table_offset = header->total_size;
1281 header->eng[i].init_status = 0;
1282 header->eng[i].table_size = 0;
1286 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1287 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1288 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1290 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1292 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1293 MMSCH_V1_0_INSERT_DIRECT_WT(
1294 SOC15_REG_OFFSET(VCN, i,
1295 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1296 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1297 MMSCH_V1_0_INSERT_DIRECT_WT(
1298 SOC15_REG_OFFSET(VCN, i,
1299 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1302 MMSCH_V1_0_INSERT_DIRECT_WT(
1303 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1305 MMSCH_V1_0_INSERT_DIRECT_WT(
1306 SOC15_REG_OFFSET(VCN, i,
1307 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1308 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1309 MMSCH_V1_0_INSERT_DIRECT_WT(
1310 SOC15_REG_OFFSET(VCN, i,
1311 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1312 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1314 MMSCH_V1_0_INSERT_DIRECT_WT(
1315 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1316 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1319 MMSCH_V1_0_INSERT_DIRECT_WT(
1320 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1322 MMSCH_V1_0_INSERT_DIRECT_WT(
1323 SOC15_REG_OFFSET(VCN, i,
1324 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1325 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1326 MMSCH_V1_0_INSERT_DIRECT_WT(
1327 SOC15_REG_OFFSET(VCN, i,
1328 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1329 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1330 MMSCH_V1_0_INSERT_DIRECT_WT(
1331 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1333 MMSCH_V1_0_INSERT_DIRECT_WT(
1334 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1335 AMDGPU_VCN_STACK_SIZE);
1336 MMSCH_V1_0_INSERT_DIRECT_WT(
1337 SOC15_REG_OFFSET(VCN, i,
1338 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1339 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1340 AMDGPU_VCN_STACK_SIZE));
1341 MMSCH_V1_0_INSERT_DIRECT_WT(
1342 SOC15_REG_OFFSET(VCN, i,
1343 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1344 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1345 AMDGPU_VCN_STACK_SIZE));
1346 MMSCH_V1_0_INSERT_DIRECT_WT(
1347 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1349 MMSCH_V1_0_INSERT_DIRECT_WT(
1350 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1351 AMDGPU_VCN_CONTEXT_SIZE);
1353 ring = &adev->vcn.inst[i].ring_enc[0];
1356 MMSCH_V1_0_INSERT_DIRECT_WT(
1357 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1358 lower_32_bits(ring->gpu_addr));
1359 MMSCH_V1_0_INSERT_DIRECT_WT(
1360 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1361 upper_32_bits(ring->gpu_addr));
1362 MMSCH_V1_0_INSERT_DIRECT_WT(
1363 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1364 ring->ring_size / 4);
1366 ring = &adev->vcn.inst[i].ring_dec;
1368 MMSCH_V1_0_INSERT_DIRECT_WT(
1369 SOC15_REG_OFFSET(VCN, i,
1370 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1371 lower_32_bits(ring->gpu_addr));
1372 MMSCH_V1_0_INSERT_DIRECT_WT(
1373 SOC15_REG_OFFSET(VCN, i,
1374 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1375 upper_32_bits(ring->gpu_addr));
1377 /* force RBC into idle state */
1378 rb_bufsz = order_base_2(ring->ring_size);
1379 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1380 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1381 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1382 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1383 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1384 MMSCH_V1_0_INSERT_DIRECT_WT(
1385 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1387 /* add end packet */
1388 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1389 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1390 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1393 header->eng[i].table_size = table_size;
1394 header->total_size += table_size;
1397 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1400 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1404 /* Wait for power status to be 1 */
1405 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1406 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1408 /* wait for read ptr to be equal to write ptr */
1409 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1410 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1412 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1413 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1415 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1416 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1418 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1419 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1421 /* disable dynamic power gating mode */
1422 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1423 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1428 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1433 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1434 if (adev->vcn.harvest_config & (1 << i))
1436 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1437 r = vcn_v2_5_stop_dpg_mode(adev, i);
1441 /* wait for vcn idle */
1442 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1446 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1447 UVD_LMI_STATUS__READ_CLEAN_MASK |
1448 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1449 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1450 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1454 /* block LMI UMC channel */
1455 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1456 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1457 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1459 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1460 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1461 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1465 /* block VCPU register access */
1466 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1467 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1468 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1471 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1472 UVD_VCPU_CNTL__BLK_RST_MASK,
1473 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1475 /* disable VCPU clock */
1476 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1477 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1480 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1482 vcn_v2_5_enable_clock_gating(adev);
1484 /* enable register anti-hang mechanism */
1485 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1486 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1487 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1490 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1491 if (adev->pm.dpm_enabled)
1492 amdgpu_dpm_enable_vcn(adev, false, i);
1498 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1499 int inst_idx, struct dpg_pause_state *new_state)
1501 struct amdgpu_ring *ring;
1502 uint32_t reg_data = 0;
1505 /* pause/unpause if state is changed */
1506 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1507 DRM_DEBUG("dpg pause state changed %d -> %d",
1508 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1509 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1510 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1512 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1513 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1514 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1517 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1520 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1521 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1524 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1525 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1526 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1528 /* Stall DPG before WPTR/RPTR reset */
1529 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1530 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1531 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1534 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1535 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1537 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1538 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1539 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1540 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1541 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1542 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1544 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1545 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1547 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1548 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1549 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1550 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1551 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1552 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1555 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1556 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1558 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1559 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1562 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1563 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1564 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1565 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1567 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1574 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1576 * @ring: amdgpu_ring pointer
1578 * Returns the current hardware read pointer
1580 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1582 struct amdgpu_device *adev = ring->adev;
1584 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1588 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1590 * @ring: amdgpu_ring pointer
1592 * Returns the current hardware write pointer
1594 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1596 struct amdgpu_device *adev = ring->adev;
1598 if (ring->use_doorbell)
1599 return *ring->wptr_cpu_addr;
1601 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1605 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1607 * @ring: amdgpu_ring pointer
1609 * Commits the write pointer to the hardware
1611 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1613 struct amdgpu_device *adev = ring->adev;
1615 if (ring->use_doorbell) {
1616 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1617 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1619 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1623 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1624 .type = AMDGPU_RING_TYPE_VCN_DEC,
1626 .secure_submission_supported = true,
1627 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1628 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1629 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1631 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1632 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1633 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1634 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1636 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1637 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1638 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1639 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1640 .test_ring = vcn_v2_0_dec_ring_test_ring,
1641 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1642 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1643 .insert_start = vcn_v2_0_dec_ring_insert_start,
1644 .insert_end = vcn_v2_0_dec_ring_insert_end,
1645 .pad_ib = amdgpu_ring_generic_pad_ib,
1646 .begin_use = amdgpu_vcn_ring_begin_use,
1647 .end_use = amdgpu_vcn_ring_end_use,
1648 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1649 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1650 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1654 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1656 * @ring: amdgpu_ring pointer
1658 * Returns the current hardware enc read pointer
1660 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1662 struct amdgpu_device *adev = ring->adev;
1664 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1665 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1667 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1671 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1673 * @ring: amdgpu_ring pointer
1675 * Returns the current hardware enc write pointer
1677 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1679 struct amdgpu_device *adev = ring->adev;
1681 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1682 if (ring->use_doorbell)
1683 return *ring->wptr_cpu_addr;
1685 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1687 if (ring->use_doorbell)
1688 return *ring->wptr_cpu_addr;
1690 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1695 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1697 * @ring: amdgpu_ring pointer
1699 * Commits the enc write pointer to the hardware
1701 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1703 struct amdgpu_device *adev = ring->adev;
1705 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1706 if (ring->use_doorbell) {
1707 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1708 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1710 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1713 if (ring->use_doorbell) {
1714 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1715 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1717 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1722 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1723 .type = AMDGPU_RING_TYPE_VCN_ENC,
1725 .nop = VCN_ENC_CMD_NO_OP,
1726 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1727 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1728 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1730 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1731 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1732 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1733 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1734 1, /* vcn_v2_0_enc_ring_insert_end */
1735 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1736 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1737 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1738 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1739 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1740 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1741 .insert_nop = amdgpu_ring_insert_nop,
1742 .insert_end = vcn_v2_0_enc_ring_insert_end,
1743 .pad_ib = amdgpu_ring_generic_pad_ib,
1744 .begin_use = amdgpu_vcn_ring_begin_use,
1745 .end_use = amdgpu_vcn_ring_end_use,
1746 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1747 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1748 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1751 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1755 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1756 if (adev->vcn.harvest_config & (1 << i))
1758 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1759 adev->vcn.inst[i].ring_dec.me = i;
1763 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1767 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1768 if (adev->vcn.harvest_config & (1 << j))
1770 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1771 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1772 adev->vcn.inst[j].ring_enc[i].me = j;
1777 static bool vcn_v2_5_is_idle(void *handle)
1779 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1782 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1783 if (adev->vcn.harvest_config & (1 << i))
1786 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1792 static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
1794 struct amdgpu_device *adev = ip_block->adev;
1797 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1798 if (adev->vcn.harvest_config & (1 << i))
1800 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1809 static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1810 enum amd_clockgating_state state)
1812 struct amdgpu_device *adev = ip_block->adev;
1813 bool enable = (state == AMD_CG_STATE_GATE);
1815 if (amdgpu_sriov_vf(adev))
1819 if (!vcn_v2_5_is_idle(adev))
1821 vcn_v2_5_enable_clock_gating(adev);
1823 vcn_v2_5_disable_clock_gating(adev);
1829 static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
1830 enum amd_powergating_state state)
1832 struct amdgpu_device *adev = ip_block->adev;
1835 if (amdgpu_sriov_vf(adev))
1838 if(state == adev->vcn.cur_state)
1841 if (state == AMD_PG_STATE_GATE)
1842 ret = vcn_v2_5_stop(adev);
1844 ret = vcn_v2_5_start(adev);
1847 adev->vcn.cur_state = state;
1852 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1853 struct amdgpu_irq_src *source,
1855 enum amdgpu_interrupt_state state)
1860 static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
1861 struct amdgpu_irq_src *source,
1863 enum amdgpu_interrupt_state state)
1868 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1869 struct amdgpu_irq_src *source,
1870 struct amdgpu_iv_entry *entry)
1872 uint32_t ip_instance;
1874 switch (entry->client_id) {
1875 case SOC15_IH_CLIENTID_VCN:
1878 case SOC15_IH_CLIENTID_VCN1:
1882 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1886 DRM_DEBUG("IH: VCN TRAP\n");
1888 switch (entry->src_id) {
1889 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1890 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1892 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1893 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1895 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1896 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1899 DRM_ERROR("Unhandled interrupt: %d %d\n",
1900 entry->src_id, entry->src_data[0]);
1907 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1908 .set = vcn_v2_5_set_interrupt_state,
1909 .process = vcn_v2_5_process_interrupt,
1912 static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
1913 .set = vcn_v2_6_set_ras_interrupt_state,
1914 .process = amdgpu_vcn_process_poison_irq,
1917 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1921 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1922 if (adev->vcn.harvest_config & (1 << i))
1924 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1925 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1927 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
1928 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
1932 static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1934 struct amdgpu_device *adev = ip_block->adev;
1936 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
1937 uint32_t inst_off, is_powered;
1939 if (!adev->vcn.ip_dump)
1942 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1943 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1944 if (adev->vcn.harvest_config & (1 << i)) {
1945 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1949 inst_off = i * reg_count;
1950 is_powered = (adev->vcn.ip_dump[inst_off] &
1951 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1954 drm_printf(p, "\nActive Instance:VCN%d\n", i);
1955 for (j = 0; j < reg_count; j++)
1956 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
1957 adev->vcn.ip_dump[inst_off + j]);
1959 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1964 static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
1966 struct amdgpu_device *adev = ip_block->adev;
1970 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
1972 if (!adev->vcn.ip_dump)
1975 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1976 if (adev->vcn.harvest_config & (1 << i))
1979 inst_off = i * reg_count;
1980 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
1981 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
1982 is_powered = (adev->vcn.ip_dump[inst_off] &
1983 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1986 for (j = 1; j < reg_count; j++)
1987 adev->vcn.ip_dump[inst_off + j] =
1988 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
1992 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1994 .early_init = vcn_v2_5_early_init,
1995 .sw_init = vcn_v2_5_sw_init,
1996 .sw_fini = vcn_v2_5_sw_fini,
1997 .hw_init = vcn_v2_5_hw_init,
1998 .hw_fini = vcn_v2_5_hw_fini,
1999 .suspend = vcn_v2_5_suspend,
2000 .resume = vcn_v2_5_resume,
2001 .is_idle = vcn_v2_5_is_idle,
2002 .wait_for_idle = vcn_v2_5_wait_for_idle,
2003 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
2004 .set_powergating_state = vcn_v2_5_set_powergating_state,
2005 .dump_ip_state = vcn_v2_5_dump_ip_state,
2006 .print_ip_state = vcn_v2_5_print_ip_state,
2009 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
2011 .early_init = vcn_v2_5_early_init,
2012 .sw_init = vcn_v2_5_sw_init,
2013 .sw_fini = vcn_v2_5_sw_fini,
2014 .hw_init = vcn_v2_5_hw_init,
2015 .hw_fini = vcn_v2_5_hw_fini,
2016 .suspend = vcn_v2_5_suspend,
2017 .resume = vcn_v2_5_resume,
2018 .is_idle = vcn_v2_5_is_idle,
2019 .wait_for_idle = vcn_v2_5_wait_for_idle,
2020 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
2021 .set_powergating_state = vcn_v2_5_set_powergating_state,
2022 .dump_ip_state = vcn_v2_5_dump_ip_state,
2023 .print_ip_state = vcn_v2_5_print_ip_state,
2026 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
2028 .type = AMD_IP_BLOCK_TYPE_VCN,
2032 .funcs = &vcn_v2_5_ip_funcs,
2035 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
2037 .type = AMD_IP_BLOCK_TYPE_VCN,
2041 .funcs = &vcn_v2_6_ip_funcs,
2044 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
2045 uint32_t instance, uint32_t sub_block)
2047 uint32_t poison_stat = 0, reg_value = 0;
2049 switch (sub_block) {
2050 case AMDGPU_VCN_V2_6_VCPU_VCODEC:
2051 reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
2052 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2059 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2060 instance, sub_block);
2065 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
2068 uint32_t poison_stat = 0;
2070 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2071 for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
2073 vcn_v2_6_query_poison_by_instance(adev, inst, sub);
2075 return !!poison_stat;
2078 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
2079 .query_poison_status = vcn_v2_6_query_poison_status,
2082 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
2084 .hw_ops = &vcn_v2_6_ras_hw_ops,
2085 .ras_late_init = amdgpu_vcn_ras_late_init,
2089 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
2091 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2092 case IP_VERSION(2, 6, 0):
2093 adev->vcn.ras = &vcn_v2_6_ras;