2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
33 #include "mmsch_v1_0.h"
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
40 #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
56 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
58 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
59 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
60 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
61 static int vcn_v2_5_set_powergating_state(void *handle,
62 enum amd_powergating_state state);
63 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
64 int inst_idx, struct dpg_pause_state *new_state);
65 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
66 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
68 static int amdgpu_ih_clientid_vcns[] = {
69 SOC15_IH_CLIENTID_VCN,
70 SOC15_IH_CLIENTID_VCN1
74 * vcn_v2_5_early_init - set function pointers and load microcode
76 * @handle: amdgpu_device pointer
78 * Set ring and irq function pointers
79 * Load microcode from filesystem
81 static int vcn_v2_5_early_init(void *handle)
83 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85 if (amdgpu_sriov_vf(adev)) {
86 adev->vcn.num_vcn_inst = 2;
87 adev->vcn.harvest_config = 0;
88 adev->vcn.num_enc_rings = 1;
93 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
94 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
95 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
96 adev->vcn.harvest_config |= 1 << i;
98 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
99 AMDGPU_VCN_HARVEST_VCN1))
100 /* both instances are harvested, disable the block */
103 adev->vcn.num_enc_rings = 2;
106 vcn_v2_5_set_dec_ring_funcs(adev);
107 vcn_v2_5_set_enc_ring_funcs(adev);
108 vcn_v2_5_set_irq_funcs(adev);
109 vcn_v2_5_set_ras_funcs(adev);
111 return amdgpu_vcn_early_init(adev);
115 * vcn_v2_5_sw_init - sw init for VCN block
117 * @handle: amdgpu_device pointer
119 * Load firmware and sw initialization
121 static int vcn_v2_5_sw_init(void *handle)
123 struct amdgpu_ring *ring;
125 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
127 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
128 if (adev->vcn.harvest_config & (1 << j))
131 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
132 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
137 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
138 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
139 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
144 /* VCN POISON TRAP */
145 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
146 VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
151 r = amdgpu_vcn_sw_init(adev);
155 amdgpu_vcn_setup_ucode(adev);
157 r = amdgpu_vcn_resume(adev);
161 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
162 volatile struct amdgpu_fw_shared *fw_shared;
164 if (adev->vcn.harvest_config & (1 << j))
166 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
167 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
168 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
169 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
171 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
173 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
174 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
175 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
176 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
177 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
178 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
179 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
180 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
181 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
182 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
184 ring = &adev->vcn.inst[j].ring_dec;
185 ring->use_doorbell = true;
187 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
188 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
190 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0))
191 ring->vm_hub = AMDGPU_MMHUB1(0);
193 ring->vm_hub = AMDGPU_MMHUB0(0);
195 sprintf(ring->name, "vcn_dec_%d", j);
196 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
197 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
201 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
202 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
204 ring = &adev->vcn.inst[j].ring_enc[i];
205 ring->use_doorbell = true;
207 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
208 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
210 if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
212 ring->vm_hub = AMDGPU_MMHUB1(0);
214 ring->vm_hub = AMDGPU_MMHUB0(0);
216 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
217 r = amdgpu_ring_init(adev, ring, 512,
218 &adev->vcn.inst[j].irq, 0,
224 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
225 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
227 if (amdgpu_vcnfw_log)
228 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
231 if (amdgpu_sriov_vf(adev)) {
232 r = amdgpu_virt_alloc_mm_table(adev);
237 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
238 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
240 r = amdgpu_vcn_ras_sw_init(adev);
248 * vcn_v2_5_sw_fini - sw fini for VCN block
250 * @handle: amdgpu_device pointer
252 * VCN suspend and free up sw allocation
254 static int vcn_v2_5_sw_fini(void *handle)
257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258 volatile struct amdgpu_fw_shared *fw_shared;
260 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
261 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
262 if (adev->vcn.harvest_config & (1 << i))
264 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
265 fw_shared->present_flag_0 = 0;
271 if (amdgpu_sriov_vf(adev))
272 amdgpu_virt_free_mm_table(adev);
274 r = amdgpu_vcn_suspend(adev);
278 r = amdgpu_vcn_sw_fini(adev);
284 * vcn_v2_5_hw_init - start and test VCN block
286 * @handle: amdgpu_device pointer
288 * Initialize the hardware, boot up the VCPU and do some testing
290 static int vcn_v2_5_hw_init(void *handle)
292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
293 struct amdgpu_ring *ring;
296 if (amdgpu_sriov_vf(adev))
297 r = vcn_v2_5_sriov_start(adev);
299 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
300 if (adev->vcn.harvest_config & (1 << j))
303 if (amdgpu_sriov_vf(adev)) {
304 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
305 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
306 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
307 adev->vcn.inst[j].ring_dec.sched.ready = true;
310 ring = &adev->vcn.inst[j].ring_dec;
312 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
313 ring->doorbell_index, j);
315 r = amdgpu_ring_test_helper(ring);
319 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
320 ring = &adev->vcn.inst[j].ring_enc[i];
321 r = amdgpu_ring_test_helper(ring);
330 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
331 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
337 * vcn_v2_5_hw_fini - stop the hardware block
339 * @handle: amdgpu_device pointer
341 * Stop the VCN block, mark ring as not ready any more
343 static int vcn_v2_5_hw_fini(void *handle)
345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
348 cancel_delayed_work_sync(&adev->vcn.idle_work);
350 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
351 if (adev->vcn.harvest_config & (1 << i))
354 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
355 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
356 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
357 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
359 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
360 amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
367 * vcn_v2_5_suspend - suspend VCN block
369 * @handle: amdgpu_device pointer
371 * HW fini and suspend VCN block
373 static int vcn_v2_5_suspend(void *handle)
376 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
378 r = vcn_v2_5_hw_fini(adev);
382 r = amdgpu_vcn_suspend(adev);
388 * vcn_v2_5_resume - resume VCN block
390 * @handle: amdgpu_device pointer
392 * Resume firmware and hw init VCN block
394 static int vcn_v2_5_resume(void *handle)
397 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
399 r = amdgpu_vcn_resume(adev);
403 r = vcn_v2_5_hw_init(adev);
409 * vcn_v2_5_mc_resume - memory controller programming
411 * @adev: amdgpu_device pointer
413 * Let the VCN memory controller know it's offsets
415 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
417 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
421 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
422 if (adev->vcn.harvest_config & (1 << i))
424 /* cache window 0: fw */
425 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
426 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
427 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
428 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
429 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
430 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
433 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
434 lower_32_bits(adev->vcn.inst[i].gpu_addr));
435 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
436 upper_32_bits(adev->vcn.inst[i].gpu_addr));
438 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
439 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
441 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
443 /* cache window 1: stack */
444 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
445 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
446 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
447 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
448 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
449 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
451 /* cache window 2: context */
452 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
453 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
454 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
455 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
456 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
457 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
459 /* non-cache window */
460 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
461 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
462 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
463 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
464 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
465 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
466 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
470 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
472 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
475 /* cache window 0: fw */
476 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
478 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
479 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
480 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
481 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
482 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
483 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
484 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
485 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
487 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
488 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
489 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
490 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
491 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
492 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
496 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
497 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
498 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
499 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
501 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
503 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
504 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
505 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
509 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
510 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
512 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
513 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
515 /* cache window 1: stack */
517 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
518 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
519 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
522 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
526 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
528 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
529 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
530 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
531 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
536 /* cache window 2: context */
537 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
538 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
539 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
540 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
541 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
542 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
543 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
544 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
545 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
548 /* non-cache window */
549 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
550 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
551 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
552 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
554 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
555 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
556 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
557 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
559 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
561 /* VCN global tiling registers */
562 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
563 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
567 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
569 * @adev: amdgpu_device pointer
571 * Disable clock gating for VCN block
573 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
578 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
579 if (adev->vcn.harvest_config & (1 << i))
581 /* UVD disable CGC */
582 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
583 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
584 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
586 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
587 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
588 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
589 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
591 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
592 data &= ~(UVD_CGC_GATE__SYS_MASK
593 | UVD_CGC_GATE__UDEC_MASK
594 | UVD_CGC_GATE__MPEG2_MASK
595 | UVD_CGC_GATE__REGS_MASK
596 | UVD_CGC_GATE__RBC_MASK
597 | UVD_CGC_GATE__LMI_MC_MASK
598 | UVD_CGC_GATE__LMI_UMC_MASK
599 | UVD_CGC_GATE__IDCT_MASK
600 | UVD_CGC_GATE__MPRD_MASK
601 | UVD_CGC_GATE__MPC_MASK
602 | UVD_CGC_GATE__LBSI_MASK
603 | UVD_CGC_GATE__LRBBM_MASK
604 | UVD_CGC_GATE__UDEC_RE_MASK
605 | UVD_CGC_GATE__UDEC_CM_MASK
606 | UVD_CGC_GATE__UDEC_IT_MASK
607 | UVD_CGC_GATE__UDEC_DB_MASK
608 | UVD_CGC_GATE__UDEC_MP_MASK
609 | UVD_CGC_GATE__WCB_MASK
610 | UVD_CGC_GATE__VCPU_MASK
611 | UVD_CGC_GATE__MMSCH_MASK);
613 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
615 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
617 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
618 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
619 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
620 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
621 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
622 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
623 | UVD_CGC_CTRL__SYS_MODE_MASK
624 | UVD_CGC_CTRL__UDEC_MODE_MASK
625 | UVD_CGC_CTRL__MPEG2_MODE_MASK
626 | UVD_CGC_CTRL__REGS_MODE_MASK
627 | UVD_CGC_CTRL__RBC_MODE_MASK
628 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
629 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
630 | UVD_CGC_CTRL__IDCT_MODE_MASK
631 | UVD_CGC_CTRL__MPRD_MODE_MASK
632 | UVD_CGC_CTRL__MPC_MODE_MASK
633 | UVD_CGC_CTRL__LBSI_MODE_MASK
634 | UVD_CGC_CTRL__LRBBM_MODE_MASK
635 | UVD_CGC_CTRL__WCB_MODE_MASK
636 | UVD_CGC_CTRL__VCPU_MODE_MASK
637 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
638 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
641 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
642 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
643 | UVD_SUVD_CGC_GATE__SIT_MASK
644 | UVD_SUVD_CGC_GATE__SMP_MASK
645 | UVD_SUVD_CGC_GATE__SCM_MASK
646 | UVD_SUVD_CGC_GATE__SDB_MASK
647 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
648 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
649 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
650 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
651 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
652 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
653 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
654 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
655 | UVD_SUVD_CGC_GATE__SCLR_MASK
656 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
657 | UVD_SUVD_CGC_GATE__ENT_MASK
658 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
659 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
660 | UVD_SUVD_CGC_GATE__SITE_MASK
661 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
662 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
663 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
664 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
665 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
666 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
668 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
669 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
670 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
671 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
672 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
673 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
674 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
675 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
676 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
677 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
678 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
679 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
683 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
684 uint8_t sram_sel, int inst_idx, uint8_t indirect)
686 uint32_t reg_data = 0;
688 /* enable sw clock gating control */
689 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
690 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
692 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
693 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
694 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
695 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
696 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
697 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
698 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
699 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
700 UVD_CGC_CTRL__SYS_MODE_MASK |
701 UVD_CGC_CTRL__UDEC_MODE_MASK |
702 UVD_CGC_CTRL__MPEG2_MODE_MASK |
703 UVD_CGC_CTRL__REGS_MODE_MASK |
704 UVD_CGC_CTRL__RBC_MODE_MASK |
705 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
706 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
707 UVD_CGC_CTRL__IDCT_MODE_MASK |
708 UVD_CGC_CTRL__MPRD_MODE_MASK |
709 UVD_CGC_CTRL__MPC_MODE_MASK |
710 UVD_CGC_CTRL__LBSI_MODE_MASK |
711 UVD_CGC_CTRL__LRBBM_MODE_MASK |
712 UVD_CGC_CTRL__WCB_MODE_MASK |
713 UVD_CGC_CTRL__VCPU_MODE_MASK |
714 UVD_CGC_CTRL__MMSCH_MODE_MASK);
715 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
716 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
718 /* turn off clock gating */
719 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
720 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
722 /* turn on SUVD clock gating */
723 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
724 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
726 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
727 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
728 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
732 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
734 * @adev: amdgpu_device pointer
736 * Enable clock gating for VCN block
738 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
743 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
744 if (adev->vcn.harvest_config & (1 << i))
747 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
748 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
749 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
751 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
752 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
753 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
754 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
756 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
757 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
758 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
759 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
760 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
761 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
762 | UVD_CGC_CTRL__SYS_MODE_MASK
763 | UVD_CGC_CTRL__UDEC_MODE_MASK
764 | UVD_CGC_CTRL__MPEG2_MODE_MASK
765 | UVD_CGC_CTRL__REGS_MODE_MASK
766 | UVD_CGC_CTRL__RBC_MODE_MASK
767 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
768 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
769 | UVD_CGC_CTRL__IDCT_MODE_MASK
770 | UVD_CGC_CTRL__MPRD_MODE_MASK
771 | UVD_CGC_CTRL__MPC_MODE_MASK
772 | UVD_CGC_CTRL__LBSI_MODE_MASK
773 | UVD_CGC_CTRL__LRBBM_MODE_MASK
774 | UVD_CGC_CTRL__WCB_MODE_MASK
775 | UVD_CGC_CTRL__VCPU_MODE_MASK);
776 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
778 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
779 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
780 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
781 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
782 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
783 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
784 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
785 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
786 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
787 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
788 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
789 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
793 static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
798 if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
801 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
802 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
803 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
804 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
805 WREG32_SOC15_DPG_MODE(inst_idx,
806 SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
809 tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
810 WREG32_SOC15_DPG_MODE(inst_idx,
811 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
814 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
815 WREG32_SOC15_DPG_MODE(inst_idx,
816 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
820 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
822 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
823 struct amdgpu_ring *ring;
824 uint32_t rb_bufsz, tmp;
826 /* disable register anti-hang mechanism */
827 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
828 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
829 /* enable dynamic power gating mode */
830 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
831 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
832 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
833 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
836 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
838 /* enable clock gating */
839 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
841 /* enable VCPU clock */
842 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
843 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
844 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
845 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
846 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
848 /* disable master interupt */
849 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
850 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
852 /* setup mmUVD_LMI_CTRL */
853 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
854 UVD_LMI_CTRL__REQ_MODE_MASK |
855 UVD_LMI_CTRL__CRC_RESET_MASK |
856 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
857 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
858 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
859 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
861 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
862 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
864 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
865 VCN, 0, mmUVD_MPC_CNTL),
866 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
868 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
869 VCN, 0, mmUVD_MPC_SET_MUXA0),
870 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
871 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
872 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
873 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
875 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
876 VCN, 0, mmUVD_MPC_SET_MUXB0),
877 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
878 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
879 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
880 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
882 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
883 VCN, 0, mmUVD_MPC_SET_MUX),
884 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
885 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
886 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
888 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
890 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
891 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
892 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
893 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
895 /* enable LMI MC and UMC channels */
896 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
897 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
899 vcn_v2_6_enable_ras(adev, inst_idx, indirect);
901 /* unblock VCPU register access */
902 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
903 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
905 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
906 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
907 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
908 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
910 /* enable master interrupt */
911 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
912 VCN, 0, mmUVD_MASTINT_EN),
913 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
916 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
918 ring = &adev->vcn.inst[inst_idx].ring_dec;
919 /* force RBC into idle state */
920 rb_bufsz = order_base_2(ring->ring_size);
921 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
922 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
923 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
924 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
925 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
926 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
928 /* Stall DPG before WPTR/RPTR reset */
929 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
930 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
931 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
932 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
934 /* set the write pointer delay */
935 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
937 /* set the wb address */
938 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
939 (upper_32_bits(ring->gpu_addr) >> 2));
941 /* program the RB_BASE for ring buffer */
942 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
943 lower_32_bits(ring->gpu_addr));
944 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
945 upper_32_bits(ring->gpu_addr));
947 /* Initialize the ring buffer's read and write pointers */
948 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
950 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
952 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
953 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
954 lower_32_bits(ring->wptr));
956 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
958 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
959 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
964 static int vcn_v2_5_start(struct amdgpu_device *adev)
966 struct amdgpu_ring *ring;
967 uint32_t rb_bufsz, tmp;
970 if (adev->pm.dpm_enabled)
971 amdgpu_dpm_enable_uvd(adev, true);
973 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
974 if (adev->vcn.harvest_config & (1 << i))
976 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
977 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
981 /* disable register anti-hang mechanism */
982 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
983 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
985 /* set uvd status busy */
986 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
987 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
990 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
994 vcn_v2_5_disable_clock_gating(adev);
996 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
997 if (adev->vcn.harvest_config & (1 << i))
999 /* enable VCPU clock */
1000 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1001 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1003 /* disable master interrupt */
1004 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1005 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1007 /* setup mmUVD_LMI_CTRL */
1008 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1010 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1011 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1012 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1013 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1014 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1016 /* setup mmUVD_MPC_CNTL */
1017 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1018 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1019 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1020 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1022 /* setup UVD_MPC_SET_MUXA0 */
1023 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1024 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1025 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1026 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1027 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1029 /* setup UVD_MPC_SET_MUXB0 */
1030 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1031 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1032 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1033 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1034 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1036 /* setup mmUVD_MPC_SET_MUX */
1037 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1038 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1039 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1040 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1043 vcn_v2_5_mc_resume(adev);
1045 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1046 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1047 if (adev->vcn.harvest_config & (1 << i))
1049 /* VCN global tiling registers */
1050 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1051 adev->gfx.config.gb_addr_config);
1052 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1053 adev->gfx.config.gb_addr_config);
1055 /* enable LMI MC and UMC channels */
1056 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1057 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1059 /* unblock VCPU register access */
1060 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1061 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1063 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1064 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1066 for (k = 0; k < 10; ++k) {
1069 for (j = 0; j < 100; ++j) {
1070 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1073 if (amdgpu_emu_mode == 1)
1082 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1083 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1084 UVD_VCPU_CNTL__BLK_RST_MASK,
1085 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1087 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1088 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1095 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1099 /* enable master interrupt */
1100 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1101 UVD_MASTINT_EN__VCPU_EN_MASK,
1102 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1104 /* clear the busy bit of VCN_STATUS */
1105 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1106 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1108 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1110 ring = &adev->vcn.inst[i].ring_dec;
1111 /* force RBC into idle state */
1112 rb_bufsz = order_base_2(ring->ring_size);
1113 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1114 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1115 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1116 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1117 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1118 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1120 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1121 /* program the RB_BASE for ring buffer */
1122 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1123 lower_32_bits(ring->gpu_addr));
1124 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1125 upper_32_bits(ring->gpu_addr));
1127 /* Initialize the ring buffer's read and write pointers */
1128 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1130 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1131 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1132 lower_32_bits(ring->wptr));
1133 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1135 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1136 ring = &adev->vcn.inst[i].ring_enc[0];
1137 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1138 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1139 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1140 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1141 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1142 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1144 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1145 ring = &adev->vcn.inst[i].ring_enc[1];
1146 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1147 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1148 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1149 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1150 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1151 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1157 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1158 struct amdgpu_mm_table *table)
1160 uint32_t data = 0, loop = 0, size = 0;
1161 uint64_t addr = table->gpu_addr;
1162 struct mmsch_v1_1_init_header *header = NULL;
1164 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1165 size = header->total_size;
1168 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1169 * memory descriptor location
1171 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1172 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1174 /* 2, update vmid of descriptor */
1175 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1176 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1177 /* use domain0 for MM scheduler */
1178 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1179 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1181 /* 3, notify mmsch about the size of this descriptor */
1182 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1184 /* 4, set resp to zero */
1185 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1188 * 5, kick off the initialization and wait until
1189 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1191 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1193 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1195 while ((data & 0x10000002) != 0x10000002) {
1197 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1205 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1213 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1215 struct amdgpu_ring *ring;
1216 uint32_t offset, size, tmp, i, rb_bufsz;
1217 uint32_t table_size = 0;
1218 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1219 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1220 struct mmsch_v1_0_cmd_end end = { { 0 } };
1221 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1222 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1224 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1225 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1226 end.cmd_header.command_type = MMSCH_COMMAND__END;
1228 header->version = MMSCH_VERSION;
1229 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1230 init_table += header->total_size;
1232 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1233 header->eng[i].table_offset = header->total_size;
1234 header->eng[i].init_status = 0;
1235 header->eng[i].table_size = 0;
1239 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1240 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1241 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1243 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1245 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1246 MMSCH_V1_0_INSERT_DIRECT_WT(
1247 SOC15_REG_OFFSET(VCN, i,
1248 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1249 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1250 MMSCH_V1_0_INSERT_DIRECT_WT(
1251 SOC15_REG_OFFSET(VCN, i,
1252 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1253 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1255 MMSCH_V1_0_INSERT_DIRECT_WT(
1256 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1258 MMSCH_V1_0_INSERT_DIRECT_WT(
1259 SOC15_REG_OFFSET(VCN, i,
1260 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1261 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1262 MMSCH_V1_0_INSERT_DIRECT_WT(
1263 SOC15_REG_OFFSET(VCN, i,
1264 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1265 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1267 MMSCH_V1_0_INSERT_DIRECT_WT(
1268 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1269 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1272 MMSCH_V1_0_INSERT_DIRECT_WT(
1273 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1275 MMSCH_V1_0_INSERT_DIRECT_WT(
1276 SOC15_REG_OFFSET(VCN, i,
1277 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1278 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1279 MMSCH_V1_0_INSERT_DIRECT_WT(
1280 SOC15_REG_OFFSET(VCN, i,
1281 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1282 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1283 MMSCH_V1_0_INSERT_DIRECT_WT(
1284 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1286 MMSCH_V1_0_INSERT_DIRECT_WT(
1287 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1288 AMDGPU_VCN_STACK_SIZE);
1289 MMSCH_V1_0_INSERT_DIRECT_WT(
1290 SOC15_REG_OFFSET(VCN, i,
1291 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1292 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1293 AMDGPU_VCN_STACK_SIZE));
1294 MMSCH_V1_0_INSERT_DIRECT_WT(
1295 SOC15_REG_OFFSET(VCN, i,
1296 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1297 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1298 AMDGPU_VCN_STACK_SIZE));
1299 MMSCH_V1_0_INSERT_DIRECT_WT(
1300 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1302 MMSCH_V1_0_INSERT_DIRECT_WT(
1303 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1304 AMDGPU_VCN_CONTEXT_SIZE);
1306 ring = &adev->vcn.inst[i].ring_enc[0];
1309 MMSCH_V1_0_INSERT_DIRECT_WT(
1310 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1311 lower_32_bits(ring->gpu_addr));
1312 MMSCH_V1_0_INSERT_DIRECT_WT(
1313 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1314 upper_32_bits(ring->gpu_addr));
1315 MMSCH_V1_0_INSERT_DIRECT_WT(
1316 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1317 ring->ring_size / 4);
1319 ring = &adev->vcn.inst[i].ring_dec;
1321 MMSCH_V1_0_INSERT_DIRECT_WT(
1322 SOC15_REG_OFFSET(VCN, i,
1323 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1324 lower_32_bits(ring->gpu_addr));
1325 MMSCH_V1_0_INSERT_DIRECT_WT(
1326 SOC15_REG_OFFSET(VCN, i,
1327 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1328 upper_32_bits(ring->gpu_addr));
1330 /* force RBC into idle state */
1331 rb_bufsz = order_base_2(ring->ring_size);
1332 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1333 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1334 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1335 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1336 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1337 MMSCH_V1_0_INSERT_DIRECT_WT(
1338 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1340 /* add end packet */
1341 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1342 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1343 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1346 header->eng[i].table_size = table_size;
1347 header->total_size += table_size;
1350 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1353 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1357 /* Wait for power status to be 1 */
1358 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1359 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1361 /* wait for read ptr to be equal to write ptr */
1362 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1363 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1365 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1366 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1368 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1369 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1371 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1372 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1374 /* disable dynamic power gating mode */
1375 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1376 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1381 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1386 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1387 if (adev->vcn.harvest_config & (1 << i))
1389 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1390 r = vcn_v2_5_stop_dpg_mode(adev, i);
1394 /* wait for vcn idle */
1395 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1399 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1400 UVD_LMI_STATUS__READ_CLEAN_MASK |
1401 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1402 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1403 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1407 /* block LMI UMC channel */
1408 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1409 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1410 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1412 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1413 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1414 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1418 /* block VCPU register access */
1419 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1420 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1421 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1424 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1425 UVD_VCPU_CNTL__BLK_RST_MASK,
1426 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1428 /* disable VCPU clock */
1429 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1430 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1433 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1435 vcn_v2_5_enable_clock_gating(adev);
1437 /* enable register anti-hang mechanism */
1438 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1439 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1440 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1443 if (adev->pm.dpm_enabled)
1444 amdgpu_dpm_enable_uvd(adev, false);
1449 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1450 int inst_idx, struct dpg_pause_state *new_state)
1452 struct amdgpu_ring *ring;
1453 uint32_t reg_data = 0;
1456 /* pause/unpause if state is changed */
1457 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1458 DRM_DEBUG("dpg pause state changed %d -> %d",
1459 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1460 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1461 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1463 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1464 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1465 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1468 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1471 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1472 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1475 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1476 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1477 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1479 /* Stall DPG before WPTR/RPTR reset */
1480 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1481 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1482 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1485 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1486 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1488 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1489 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1490 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1491 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1492 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1493 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1495 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1496 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1498 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1499 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1500 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1501 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1502 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1503 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1506 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1507 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1509 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1510 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1513 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1514 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1515 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1516 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1518 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1525 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1527 * @ring: amdgpu_ring pointer
1529 * Returns the current hardware read pointer
1531 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1533 struct amdgpu_device *adev = ring->adev;
1535 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1539 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1541 * @ring: amdgpu_ring pointer
1543 * Returns the current hardware write pointer
1545 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1547 struct amdgpu_device *adev = ring->adev;
1549 if (ring->use_doorbell)
1550 return *ring->wptr_cpu_addr;
1552 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1556 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1558 * @ring: amdgpu_ring pointer
1560 * Commits the write pointer to the hardware
1562 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1564 struct amdgpu_device *adev = ring->adev;
1566 if (ring->use_doorbell) {
1567 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1568 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1570 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1574 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1575 .type = AMDGPU_RING_TYPE_VCN_DEC,
1577 .secure_submission_supported = true,
1578 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1579 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1580 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1582 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1583 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1584 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1585 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1587 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1588 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1589 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1590 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1591 .test_ring = vcn_v2_0_dec_ring_test_ring,
1592 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1593 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1594 .insert_start = vcn_v2_0_dec_ring_insert_start,
1595 .insert_end = vcn_v2_0_dec_ring_insert_end,
1596 .pad_ib = amdgpu_ring_generic_pad_ib,
1597 .begin_use = amdgpu_vcn_ring_begin_use,
1598 .end_use = amdgpu_vcn_ring_end_use,
1599 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1600 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1601 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1605 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1607 * @ring: amdgpu_ring pointer
1609 * Returns the current hardware enc read pointer
1611 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1613 struct amdgpu_device *adev = ring->adev;
1615 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1616 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1618 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1622 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1624 * @ring: amdgpu_ring pointer
1626 * Returns the current hardware enc write pointer
1628 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1630 struct amdgpu_device *adev = ring->adev;
1632 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1633 if (ring->use_doorbell)
1634 return *ring->wptr_cpu_addr;
1636 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1638 if (ring->use_doorbell)
1639 return *ring->wptr_cpu_addr;
1641 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1646 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1648 * @ring: amdgpu_ring pointer
1650 * Commits the enc write pointer to the hardware
1652 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1654 struct amdgpu_device *adev = ring->adev;
1656 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1657 if (ring->use_doorbell) {
1658 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1659 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1661 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1664 if (ring->use_doorbell) {
1665 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1666 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1668 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1673 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1674 .type = AMDGPU_RING_TYPE_VCN_ENC,
1676 .nop = VCN_ENC_CMD_NO_OP,
1677 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1678 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1679 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1681 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1682 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1683 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1684 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1685 1, /* vcn_v2_0_enc_ring_insert_end */
1686 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1687 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1688 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1689 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1690 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1691 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1692 .insert_nop = amdgpu_ring_insert_nop,
1693 .insert_end = vcn_v2_0_enc_ring_insert_end,
1694 .pad_ib = amdgpu_ring_generic_pad_ib,
1695 .begin_use = amdgpu_vcn_ring_begin_use,
1696 .end_use = amdgpu_vcn_ring_end_use,
1697 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1698 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1699 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1702 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1706 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1707 if (adev->vcn.harvest_config & (1 << i))
1709 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1710 adev->vcn.inst[i].ring_dec.me = i;
1711 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1715 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1719 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1720 if (adev->vcn.harvest_config & (1 << j))
1722 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1723 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1724 adev->vcn.inst[j].ring_enc[i].me = j;
1726 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1730 static bool vcn_v2_5_is_idle(void *handle)
1732 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1735 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1736 if (adev->vcn.harvest_config & (1 << i))
1738 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1744 static int vcn_v2_5_wait_for_idle(void *handle)
1746 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1749 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1750 if (adev->vcn.harvest_config & (1 << i))
1752 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1761 static int vcn_v2_5_set_clockgating_state(void *handle,
1762 enum amd_clockgating_state state)
1764 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1765 bool enable = (state == AMD_CG_STATE_GATE);
1767 if (amdgpu_sriov_vf(adev))
1771 if (!vcn_v2_5_is_idle(handle))
1773 vcn_v2_5_enable_clock_gating(adev);
1775 vcn_v2_5_disable_clock_gating(adev);
1781 static int vcn_v2_5_set_powergating_state(void *handle,
1782 enum amd_powergating_state state)
1784 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1787 if (amdgpu_sriov_vf(adev))
1790 if(state == adev->vcn.cur_state)
1793 if (state == AMD_PG_STATE_GATE)
1794 ret = vcn_v2_5_stop(adev);
1796 ret = vcn_v2_5_start(adev);
1799 adev->vcn.cur_state = state;
1804 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1805 struct amdgpu_irq_src *source,
1807 enum amdgpu_interrupt_state state)
1812 static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
1813 struct amdgpu_irq_src *source,
1815 enum amdgpu_interrupt_state state)
1820 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1821 struct amdgpu_irq_src *source,
1822 struct amdgpu_iv_entry *entry)
1824 uint32_t ip_instance;
1826 switch (entry->client_id) {
1827 case SOC15_IH_CLIENTID_VCN:
1830 case SOC15_IH_CLIENTID_VCN1:
1834 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1838 DRM_DEBUG("IH: VCN TRAP\n");
1840 switch (entry->src_id) {
1841 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1842 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1844 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1845 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1847 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1848 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1851 DRM_ERROR("Unhandled interrupt: %d %d\n",
1852 entry->src_id, entry->src_data[0]);
1859 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1860 .set = vcn_v2_5_set_interrupt_state,
1861 .process = vcn_v2_5_process_interrupt,
1864 static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
1865 .set = vcn_v2_6_set_ras_interrupt_state,
1866 .process = amdgpu_vcn_process_poison_irq,
1869 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1873 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1874 if (adev->vcn.harvest_config & (1 << i))
1876 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1877 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1879 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
1880 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
1884 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1886 .early_init = vcn_v2_5_early_init,
1888 .sw_init = vcn_v2_5_sw_init,
1889 .sw_fini = vcn_v2_5_sw_fini,
1890 .hw_init = vcn_v2_5_hw_init,
1891 .hw_fini = vcn_v2_5_hw_fini,
1892 .suspend = vcn_v2_5_suspend,
1893 .resume = vcn_v2_5_resume,
1894 .is_idle = vcn_v2_5_is_idle,
1895 .wait_for_idle = vcn_v2_5_wait_for_idle,
1896 .check_soft_reset = NULL,
1897 .pre_soft_reset = NULL,
1899 .post_soft_reset = NULL,
1900 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1901 .set_powergating_state = vcn_v2_5_set_powergating_state,
1904 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1906 .early_init = vcn_v2_5_early_init,
1908 .sw_init = vcn_v2_5_sw_init,
1909 .sw_fini = vcn_v2_5_sw_fini,
1910 .hw_init = vcn_v2_5_hw_init,
1911 .hw_fini = vcn_v2_5_hw_fini,
1912 .suspend = vcn_v2_5_suspend,
1913 .resume = vcn_v2_5_resume,
1914 .is_idle = vcn_v2_5_is_idle,
1915 .wait_for_idle = vcn_v2_5_wait_for_idle,
1916 .check_soft_reset = NULL,
1917 .pre_soft_reset = NULL,
1919 .post_soft_reset = NULL,
1920 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1921 .set_powergating_state = vcn_v2_5_set_powergating_state,
1924 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1926 .type = AMD_IP_BLOCK_TYPE_VCN,
1930 .funcs = &vcn_v2_5_ip_funcs,
1933 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1935 .type = AMD_IP_BLOCK_TYPE_VCN,
1939 .funcs = &vcn_v2_6_ip_funcs,
1942 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
1943 uint32_t instance, uint32_t sub_block)
1945 uint32_t poison_stat = 0, reg_value = 0;
1947 switch (sub_block) {
1948 case AMDGPU_VCN_V2_6_VCPU_VCODEC:
1949 reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
1950 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1957 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1958 instance, sub_block);
1963 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
1966 uint32_t poison_stat = 0;
1968 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1969 for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
1971 vcn_v2_6_query_poison_by_instance(adev, inst, sub);
1973 return !!poison_stat;
1976 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
1977 .query_poison_status = vcn_v2_6_query_poison_status,
1980 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
1982 .hw_ops = &vcn_v2_6_ras_hw_ops,
1983 .ras_late_init = amdgpu_vcn_ras_late_init,
1987 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
1989 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
1990 case IP_VERSION(2, 6, 0):
1991 adev->vcn.ras = &vcn_v2_6_ras;