2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
33 #include "mmsch_v1_0.h"
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
40 #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
56 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
58 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
59 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
60 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
61 static int vcn_v2_5_set_powergating_state(void *handle,
62 enum amd_powergating_state state);
63 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
64 int inst_idx, struct dpg_pause_state *new_state);
65 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
66 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
68 static int amdgpu_ih_clientid_vcns[] = {
69 SOC15_IH_CLIENTID_VCN,
70 SOC15_IH_CLIENTID_VCN1
74 * vcn_v2_5_early_init - set function pointers
76 * @handle: amdgpu_device pointer
78 * Set ring and irq function pointers
80 static int vcn_v2_5_early_init(void *handle)
82 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
84 if (amdgpu_sriov_vf(adev)) {
85 adev->vcn.num_vcn_inst = 2;
86 adev->vcn.harvest_config = 0;
87 adev->vcn.num_enc_rings = 1;
92 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
93 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
94 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
95 adev->vcn.harvest_config |= 1 << i;
97 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
98 AMDGPU_VCN_HARVEST_VCN1))
99 /* both instances are harvested, disable the block */
102 adev->vcn.num_enc_rings = 2;
105 vcn_v2_5_set_dec_ring_funcs(adev);
106 vcn_v2_5_set_enc_ring_funcs(adev);
107 vcn_v2_5_set_irq_funcs(adev);
108 vcn_v2_5_set_ras_funcs(adev);
114 * vcn_v2_5_sw_init - sw init for VCN block
116 * @handle: amdgpu_device pointer
118 * Load firmware and sw initialization
120 static int vcn_v2_5_sw_init(void *handle)
122 struct amdgpu_ring *ring;
124 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
126 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
127 if (adev->vcn.harvest_config & (1 << j))
130 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
131 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
136 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
137 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
138 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
143 /* VCN POISON TRAP */
144 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
145 VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
150 r = amdgpu_vcn_sw_init(adev);
154 amdgpu_vcn_setup_ucode(adev);
156 r = amdgpu_vcn_resume(adev);
160 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
161 volatile struct amdgpu_fw_shared *fw_shared;
163 if (adev->vcn.harvest_config & (1 << j))
165 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
166 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
167 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
168 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
169 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
170 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
172 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
173 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
174 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
175 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
176 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
177 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
178 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
179 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
180 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
181 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
183 ring = &adev->vcn.inst[j].ring_dec;
184 ring->use_doorbell = true;
186 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
187 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
188 sprintf(ring->name, "vcn_dec_%d", j);
189 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
190 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
194 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
195 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
197 ring = &adev->vcn.inst[j].ring_enc[i];
198 ring->use_doorbell = true;
200 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
201 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
203 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
204 r = amdgpu_ring_init(adev, ring, 512,
205 &adev->vcn.inst[j].irq, 0,
211 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
212 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
214 if (amdgpu_vcnfw_log)
215 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
218 if (amdgpu_sriov_vf(adev)) {
219 r = amdgpu_virt_alloc_mm_table(adev);
224 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
225 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
231 * vcn_v2_5_sw_fini - sw fini for VCN block
233 * @handle: amdgpu_device pointer
235 * VCN suspend and free up sw allocation
237 static int vcn_v2_5_sw_fini(void *handle)
240 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
241 volatile struct amdgpu_fw_shared *fw_shared;
243 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
244 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
245 if (adev->vcn.harvest_config & (1 << i))
247 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
248 fw_shared->present_flag_0 = 0;
254 if (amdgpu_sriov_vf(adev))
255 amdgpu_virt_free_mm_table(adev);
257 r = amdgpu_vcn_suspend(adev);
261 r = amdgpu_vcn_sw_fini(adev);
267 * vcn_v2_5_hw_init - start and test VCN block
269 * @handle: amdgpu_device pointer
271 * Initialize the hardware, boot up the VCPU and do some testing
273 static int vcn_v2_5_hw_init(void *handle)
275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
276 struct amdgpu_ring *ring;
279 if (amdgpu_sriov_vf(adev))
280 r = vcn_v2_5_sriov_start(adev);
282 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
283 if (adev->vcn.harvest_config & (1 << j))
286 if (amdgpu_sriov_vf(adev)) {
287 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
288 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
289 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
290 adev->vcn.inst[j].ring_dec.sched.ready = true;
293 ring = &adev->vcn.inst[j].ring_dec;
295 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
296 ring->doorbell_index, j);
298 r = amdgpu_ring_test_helper(ring);
302 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
303 ring = &adev->vcn.inst[j].ring_enc[i];
304 r = amdgpu_ring_test_helper(ring);
313 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
314 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
320 * vcn_v2_5_hw_fini - stop the hardware block
322 * @handle: amdgpu_device pointer
324 * Stop the VCN block, mark ring as not ready any more
326 static int vcn_v2_5_hw_fini(void *handle)
328 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
331 cancel_delayed_work_sync(&adev->vcn.idle_work);
333 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
334 if (adev->vcn.harvest_config & (1 << i))
337 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
338 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
339 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
340 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
347 * vcn_v2_5_suspend - suspend VCN block
349 * @handle: amdgpu_device pointer
351 * HW fini and suspend VCN block
353 static int vcn_v2_5_suspend(void *handle)
356 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
358 r = vcn_v2_5_hw_fini(adev);
362 r = amdgpu_vcn_suspend(adev);
368 * vcn_v2_5_resume - resume VCN block
370 * @handle: amdgpu_device pointer
372 * Resume firmware and hw init VCN block
374 static int vcn_v2_5_resume(void *handle)
377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379 r = amdgpu_vcn_resume(adev);
383 r = vcn_v2_5_hw_init(adev);
389 * vcn_v2_5_mc_resume - memory controller programming
391 * @adev: amdgpu_device pointer
393 * Let the VCN memory controller know it's offsets
395 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
397 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
401 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
402 if (adev->vcn.harvest_config & (1 << i))
404 /* cache window 0: fw */
405 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
406 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
407 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
408 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
409 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
410 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
413 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
414 lower_32_bits(adev->vcn.inst[i].gpu_addr));
415 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
416 upper_32_bits(adev->vcn.inst[i].gpu_addr));
418 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
419 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
421 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
423 /* cache window 1: stack */
424 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
425 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
426 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
427 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
428 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
429 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
431 /* cache window 2: context */
432 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
433 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
434 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
435 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
436 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
437 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
439 /* non-cache window */
440 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
441 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
442 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
443 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
444 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
445 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
446 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
450 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
452 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
455 /* cache window 0: fw */
456 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
458 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
459 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
460 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
461 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
462 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
463 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
464 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
465 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
467 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
468 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
469 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
470 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
471 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
472 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
476 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
477 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
478 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
479 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
480 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
481 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
483 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
484 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
485 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
489 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
490 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
492 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
493 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
495 /* cache window 1: stack */
497 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
498 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
499 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
500 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
501 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
502 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
503 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
504 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
506 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
507 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
508 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
510 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
511 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
513 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
514 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
516 /* cache window 2: context */
517 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
518 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
519 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
522 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
525 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
526 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
528 /* non-cache window */
529 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
530 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
531 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
532 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
533 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
534 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
535 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
536 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
537 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
538 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
539 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
541 /* VCN global tiling registers */
542 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
543 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
547 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
549 * @adev: amdgpu_device pointer
551 * Disable clock gating for VCN block
553 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
558 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
559 if (adev->vcn.harvest_config & (1 << i))
561 /* UVD disable CGC */
562 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
563 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
564 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
566 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
567 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
568 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
569 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
571 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
572 data &= ~(UVD_CGC_GATE__SYS_MASK
573 | UVD_CGC_GATE__UDEC_MASK
574 | UVD_CGC_GATE__MPEG2_MASK
575 | UVD_CGC_GATE__REGS_MASK
576 | UVD_CGC_GATE__RBC_MASK
577 | UVD_CGC_GATE__LMI_MC_MASK
578 | UVD_CGC_GATE__LMI_UMC_MASK
579 | UVD_CGC_GATE__IDCT_MASK
580 | UVD_CGC_GATE__MPRD_MASK
581 | UVD_CGC_GATE__MPC_MASK
582 | UVD_CGC_GATE__LBSI_MASK
583 | UVD_CGC_GATE__LRBBM_MASK
584 | UVD_CGC_GATE__UDEC_RE_MASK
585 | UVD_CGC_GATE__UDEC_CM_MASK
586 | UVD_CGC_GATE__UDEC_IT_MASK
587 | UVD_CGC_GATE__UDEC_DB_MASK
588 | UVD_CGC_GATE__UDEC_MP_MASK
589 | UVD_CGC_GATE__WCB_MASK
590 | UVD_CGC_GATE__VCPU_MASK
591 | UVD_CGC_GATE__MMSCH_MASK);
593 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
595 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
597 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
598 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
599 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
600 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
601 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
602 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
603 | UVD_CGC_CTRL__SYS_MODE_MASK
604 | UVD_CGC_CTRL__UDEC_MODE_MASK
605 | UVD_CGC_CTRL__MPEG2_MODE_MASK
606 | UVD_CGC_CTRL__REGS_MODE_MASK
607 | UVD_CGC_CTRL__RBC_MODE_MASK
608 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
609 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
610 | UVD_CGC_CTRL__IDCT_MODE_MASK
611 | UVD_CGC_CTRL__MPRD_MODE_MASK
612 | UVD_CGC_CTRL__MPC_MODE_MASK
613 | UVD_CGC_CTRL__LBSI_MODE_MASK
614 | UVD_CGC_CTRL__LRBBM_MODE_MASK
615 | UVD_CGC_CTRL__WCB_MODE_MASK
616 | UVD_CGC_CTRL__VCPU_MODE_MASK
617 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
618 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
621 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
622 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
623 | UVD_SUVD_CGC_GATE__SIT_MASK
624 | UVD_SUVD_CGC_GATE__SMP_MASK
625 | UVD_SUVD_CGC_GATE__SCM_MASK
626 | UVD_SUVD_CGC_GATE__SDB_MASK
627 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
628 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
629 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
630 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
631 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
632 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
633 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
634 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
635 | UVD_SUVD_CGC_GATE__SCLR_MASK
636 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
637 | UVD_SUVD_CGC_GATE__ENT_MASK
638 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
639 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
640 | UVD_SUVD_CGC_GATE__SITE_MASK
641 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
642 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
643 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
644 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
645 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
646 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
648 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
649 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
650 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
651 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
652 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
653 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
654 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
655 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
656 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
657 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
658 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
659 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
663 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
664 uint8_t sram_sel, int inst_idx, uint8_t indirect)
666 uint32_t reg_data = 0;
668 /* enable sw clock gating control */
669 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
670 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
672 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
673 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
674 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
675 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
676 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
677 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
678 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
679 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
680 UVD_CGC_CTRL__SYS_MODE_MASK |
681 UVD_CGC_CTRL__UDEC_MODE_MASK |
682 UVD_CGC_CTRL__MPEG2_MODE_MASK |
683 UVD_CGC_CTRL__REGS_MODE_MASK |
684 UVD_CGC_CTRL__RBC_MODE_MASK |
685 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
686 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
687 UVD_CGC_CTRL__IDCT_MODE_MASK |
688 UVD_CGC_CTRL__MPRD_MODE_MASK |
689 UVD_CGC_CTRL__MPC_MODE_MASK |
690 UVD_CGC_CTRL__LBSI_MODE_MASK |
691 UVD_CGC_CTRL__LRBBM_MODE_MASK |
692 UVD_CGC_CTRL__WCB_MODE_MASK |
693 UVD_CGC_CTRL__VCPU_MODE_MASK |
694 UVD_CGC_CTRL__MMSCH_MODE_MASK);
695 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
696 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
698 /* turn off clock gating */
699 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
700 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
702 /* turn on SUVD clock gating */
703 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
704 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
706 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
707 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
708 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
712 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
714 * @adev: amdgpu_device pointer
716 * Enable clock gating for VCN block
718 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
723 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
724 if (adev->vcn.harvest_config & (1 << i))
727 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
728 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
729 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
731 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
732 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
733 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
734 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
736 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
737 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
738 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
739 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
740 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
741 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
742 | UVD_CGC_CTRL__SYS_MODE_MASK
743 | UVD_CGC_CTRL__UDEC_MODE_MASK
744 | UVD_CGC_CTRL__MPEG2_MODE_MASK
745 | UVD_CGC_CTRL__REGS_MODE_MASK
746 | UVD_CGC_CTRL__RBC_MODE_MASK
747 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
748 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
749 | UVD_CGC_CTRL__IDCT_MODE_MASK
750 | UVD_CGC_CTRL__MPRD_MODE_MASK
751 | UVD_CGC_CTRL__MPC_MODE_MASK
752 | UVD_CGC_CTRL__LBSI_MODE_MASK
753 | UVD_CGC_CTRL__LRBBM_MODE_MASK
754 | UVD_CGC_CTRL__WCB_MODE_MASK
755 | UVD_CGC_CTRL__VCPU_MODE_MASK);
756 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
758 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
759 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
760 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
761 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
762 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
763 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
764 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
765 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
766 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
767 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
768 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
769 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
773 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
775 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
776 struct amdgpu_ring *ring;
777 uint32_t rb_bufsz, tmp;
779 /* disable register anti-hang mechanism */
780 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
781 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
782 /* enable dynamic power gating mode */
783 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
784 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
785 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
786 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
789 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
791 /* enable clock gating */
792 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
794 /* enable VCPU clock */
795 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
796 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
797 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
798 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
799 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
801 /* disable master interupt */
802 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
803 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
805 /* setup mmUVD_LMI_CTRL */
806 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
807 UVD_LMI_CTRL__REQ_MODE_MASK |
808 UVD_LMI_CTRL__CRC_RESET_MASK |
809 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
810 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
811 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
812 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
814 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
815 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
817 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
818 VCN, 0, mmUVD_MPC_CNTL),
819 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
821 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
822 VCN, 0, mmUVD_MPC_SET_MUXA0),
823 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
824 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
825 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
826 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
828 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
829 VCN, 0, mmUVD_MPC_SET_MUXB0),
830 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
831 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
832 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
833 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
835 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
836 VCN, 0, mmUVD_MPC_SET_MUX),
837 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
838 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
839 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
841 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
843 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
844 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
845 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
846 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
848 /* enable LMI MC and UMC channels */
849 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
850 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
852 /* unblock VCPU register access */
853 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
854 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
856 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
857 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
858 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
859 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
861 /* enable master interrupt */
862 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
863 VCN, 0, mmUVD_MASTINT_EN),
864 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
867 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
868 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
869 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
871 ring = &adev->vcn.inst[inst_idx].ring_dec;
872 /* force RBC into idle state */
873 rb_bufsz = order_base_2(ring->ring_size);
874 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
875 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
876 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
877 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
878 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
879 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
881 /* Stall DPG before WPTR/RPTR reset */
882 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
883 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
884 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
885 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
887 /* set the write pointer delay */
888 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
890 /* set the wb address */
891 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
892 (upper_32_bits(ring->gpu_addr) >> 2));
894 /* program the RB_BASE for ring buffer */
895 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
896 lower_32_bits(ring->gpu_addr));
897 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
898 upper_32_bits(ring->gpu_addr));
900 /* Initialize the ring buffer's read and write pointers */
901 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
903 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
905 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
906 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
907 lower_32_bits(ring->wptr));
909 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
911 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
912 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
917 static int vcn_v2_5_start(struct amdgpu_device *adev)
919 struct amdgpu_ring *ring;
920 uint32_t rb_bufsz, tmp;
923 if (adev->pm.dpm_enabled)
924 amdgpu_dpm_enable_uvd(adev, true);
926 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
927 if (adev->vcn.harvest_config & (1 << i))
929 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
930 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
934 /* disable register anti-hang mechanism */
935 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
936 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
938 /* set uvd status busy */
939 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
940 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
943 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
947 vcn_v2_5_disable_clock_gating(adev);
949 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
950 if (adev->vcn.harvest_config & (1 << i))
952 /* enable VCPU clock */
953 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
954 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
956 /* disable master interrupt */
957 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
958 ~UVD_MASTINT_EN__VCPU_EN_MASK);
960 /* setup mmUVD_LMI_CTRL */
961 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
963 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
964 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
965 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
966 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
967 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
969 /* setup mmUVD_MPC_CNTL */
970 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
971 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
972 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
973 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
975 /* setup UVD_MPC_SET_MUXA0 */
976 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
977 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
978 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
979 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
980 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
982 /* setup UVD_MPC_SET_MUXB0 */
983 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
984 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
985 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
986 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
987 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
989 /* setup mmUVD_MPC_SET_MUX */
990 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
991 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
992 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
993 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
996 vcn_v2_5_mc_resume(adev);
998 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
999 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1000 if (adev->vcn.harvest_config & (1 << i))
1002 /* VCN global tiling registers */
1003 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1004 adev->gfx.config.gb_addr_config);
1005 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1006 adev->gfx.config.gb_addr_config);
1008 /* enable LMI MC and UMC channels */
1009 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1010 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1012 /* unblock VCPU register access */
1013 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1014 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1016 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1017 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1019 for (k = 0; k < 10; ++k) {
1022 for (j = 0; j < 100; ++j) {
1023 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1026 if (amdgpu_emu_mode == 1)
1035 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1036 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1037 UVD_VCPU_CNTL__BLK_RST_MASK,
1038 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1040 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1041 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1048 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1052 /* enable master interrupt */
1053 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1054 UVD_MASTINT_EN__VCPU_EN_MASK,
1055 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1057 /* clear the busy bit of VCN_STATUS */
1058 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1059 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1061 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1063 ring = &adev->vcn.inst[i].ring_dec;
1064 /* force RBC into idle state */
1065 rb_bufsz = order_base_2(ring->ring_size);
1066 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1067 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1068 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1069 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1070 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1071 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1073 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1074 /* program the RB_BASE for ring buffer */
1075 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1076 lower_32_bits(ring->gpu_addr));
1077 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1078 upper_32_bits(ring->gpu_addr));
1080 /* Initialize the ring buffer's read and write pointers */
1081 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1083 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1084 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1085 lower_32_bits(ring->wptr));
1086 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1088 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1089 ring = &adev->vcn.inst[i].ring_enc[0];
1090 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1091 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1092 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1093 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1094 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1095 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1097 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1098 ring = &adev->vcn.inst[i].ring_enc[1];
1099 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1100 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1101 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1102 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1103 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1104 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1110 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1111 struct amdgpu_mm_table *table)
1113 uint32_t data = 0, loop = 0, size = 0;
1114 uint64_t addr = table->gpu_addr;
1115 struct mmsch_v1_1_init_header *header = NULL;
1117 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1118 size = header->total_size;
1121 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1122 * memory descriptor location
1124 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1125 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1127 /* 2, update vmid of descriptor */
1128 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1129 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1130 /* use domain0 for MM scheduler */
1131 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1132 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1134 /* 3, notify mmsch about the size of this descriptor */
1135 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1137 /* 4, set resp to zero */
1138 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1141 * 5, kick off the initialization and wait until
1142 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1144 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1146 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1148 while ((data & 0x10000002) != 0x10000002) {
1150 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1158 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1166 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1168 struct amdgpu_ring *ring;
1169 uint32_t offset, size, tmp, i, rb_bufsz;
1170 uint32_t table_size = 0;
1171 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1172 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1173 struct mmsch_v1_0_cmd_end end = { { 0 } };
1174 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1175 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1177 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1178 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1179 end.cmd_header.command_type = MMSCH_COMMAND__END;
1181 header->version = MMSCH_VERSION;
1182 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1183 init_table += header->total_size;
1185 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1186 header->eng[i].table_offset = header->total_size;
1187 header->eng[i].init_status = 0;
1188 header->eng[i].table_size = 0;
1192 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1193 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1194 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1196 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1198 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1199 MMSCH_V1_0_INSERT_DIRECT_WT(
1200 SOC15_REG_OFFSET(VCN, i,
1201 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1202 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1203 MMSCH_V1_0_INSERT_DIRECT_WT(
1204 SOC15_REG_OFFSET(VCN, i,
1205 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1206 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1208 MMSCH_V1_0_INSERT_DIRECT_WT(
1209 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1211 MMSCH_V1_0_INSERT_DIRECT_WT(
1212 SOC15_REG_OFFSET(VCN, i,
1213 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1214 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1215 MMSCH_V1_0_INSERT_DIRECT_WT(
1216 SOC15_REG_OFFSET(VCN, i,
1217 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1218 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1220 MMSCH_V1_0_INSERT_DIRECT_WT(
1221 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1222 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1225 MMSCH_V1_0_INSERT_DIRECT_WT(
1226 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1228 MMSCH_V1_0_INSERT_DIRECT_WT(
1229 SOC15_REG_OFFSET(VCN, i,
1230 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1231 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1232 MMSCH_V1_0_INSERT_DIRECT_WT(
1233 SOC15_REG_OFFSET(VCN, i,
1234 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1235 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1236 MMSCH_V1_0_INSERT_DIRECT_WT(
1237 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1239 MMSCH_V1_0_INSERT_DIRECT_WT(
1240 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1241 AMDGPU_VCN_STACK_SIZE);
1242 MMSCH_V1_0_INSERT_DIRECT_WT(
1243 SOC15_REG_OFFSET(VCN, i,
1244 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1245 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1246 AMDGPU_VCN_STACK_SIZE));
1247 MMSCH_V1_0_INSERT_DIRECT_WT(
1248 SOC15_REG_OFFSET(VCN, i,
1249 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1250 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1251 AMDGPU_VCN_STACK_SIZE));
1252 MMSCH_V1_0_INSERT_DIRECT_WT(
1253 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1255 MMSCH_V1_0_INSERT_DIRECT_WT(
1256 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1257 AMDGPU_VCN_CONTEXT_SIZE);
1259 ring = &adev->vcn.inst[i].ring_enc[0];
1262 MMSCH_V1_0_INSERT_DIRECT_WT(
1263 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1264 lower_32_bits(ring->gpu_addr));
1265 MMSCH_V1_0_INSERT_DIRECT_WT(
1266 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1267 upper_32_bits(ring->gpu_addr));
1268 MMSCH_V1_0_INSERT_DIRECT_WT(
1269 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1270 ring->ring_size / 4);
1272 ring = &adev->vcn.inst[i].ring_dec;
1274 MMSCH_V1_0_INSERT_DIRECT_WT(
1275 SOC15_REG_OFFSET(VCN, i,
1276 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1277 lower_32_bits(ring->gpu_addr));
1278 MMSCH_V1_0_INSERT_DIRECT_WT(
1279 SOC15_REG_OFFSET(VCN, i,
1280 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1281 upper_32_bits(ring->gpu_addr));
1283 /* force RBC into idle state */
1284 rb_bufsz = order_base_2(ring->ring_size);
1285 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1286 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1287 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1288 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1289 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1290 MMSCH_V1_0_INSERT_DIRECT_WT(
1291 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1293 /* add end packet */
1294 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1295 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1296 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1299 header->eng[i].table_size = table_size;
1300 header->total_size += table_size;
1303 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1306 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1310 /* Wait for power status to be 1 */
1311 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1312 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1314 /* wait for read ptr to be equal to write ptr */
1315 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1316 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1318 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1319 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1321 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1322 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1324 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1325 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1327 /* disable dynamic power gating mode */
1328 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1329 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1334 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1339 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1340 if (adev->vcn.harvest_config & (1 << i))
1342 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1343 r = vcn_v2_5_stop_dpg_mode(adev, i);
1347 /* wait for vcn idle */
1348 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1352 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1353 UVD_LMI_STATUS__READ_CLEAN_MASK |
1354 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1355 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1356 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1360 /* block LMI UMC channel */
1361 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1362 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1363 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1365 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1366 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1367 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1371 /* block VCPU register access */
1372 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1373 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1374 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1377 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1378 UVD_VCPU_CNTL__BLK_RST_MASK,
1379 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1381 /* disable VCPU clock */
1382 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1383 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1386 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1388 vcn_v2_5_enable_clock_gating(adev);
1390 /* enable register anti-hang mechanism */
1391 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1392 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1393 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1396 if (adev->pm.dpm_enabled)
1397 amdgpu_dpm_enable_uvd(adev, false);
1402 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1403 int inst_idx, struct dpg_pause_state *new_state)
1405 struct amdgpu_ring *ring;
1406 uint32_t reg_data = 0;
1409 /* pause/unpause if state is changed */
1410 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1411 DRM_DEBUG("dpg pause state changed %d -> %d",
1412 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1413 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1414 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1416 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1417 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1418 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1421 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1424 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1425 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1428 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1429 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1430 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1432 /* Stall DPG before WPTR/RPTR reset */
1433 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1434 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1435 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1438 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1439 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1441 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1442 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1443 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1444 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1445 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1446 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1448 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1449 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1451 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1452 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1453 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1454 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1455 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1456 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1459 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1460 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1462 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1463 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1466 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1467 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1468 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1469 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1471 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1478 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1480 * @ring: amdgpu_ring pointer
1482 * Returns the current hardware read pointer
1484 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1486 struct amdgpu_device *adev = ring->adev;
1488 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1492 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1494 * @ring: amdgpu_ring pointer
1496 * Returns the current hardware write pointer
1498 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1500 struct amdgpu_device *adev = ring->adev;
1502 if (ring->use_doorbell)
1503 return *ring->wptr_cpu_addr;
1505 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1509 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1511 * @ring: amdgpu_ring pointer
1513 * Commits the write pointer to the hardware
1515 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1517 struct amdgpu_device *adev = ring->adev;
1519 if (ring->use_doorbell) {
1520 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1521 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1523 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1527 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1528 .type = AMDGPU_RING_TYPE_VCN_DEC,
1530 .secure_submission_supported = true,
1531 .vmhub = AMDGPU_MMHUB_1,
1532 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1533 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1534 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1536 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1537 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1538 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1539 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1541 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1542 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1543 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1544 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1545 .test_ring = vcn_v2_0_dec_ring_test_ring,
1546 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1547 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1548 .insert_start = vcn_v2_0_dec_ring_insert_start,
1549 .insert_end = vcn_v2_0_dec_ring_insert_end,
1550 .pad_ib = amdgpu_ring_generic_pad_ib,
1551 .begin_use = amdgpu_vcn_ring_begin_use,
1552 .end_use = amdgpu_vcn_ring_end_use,
1553 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1554 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1555 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1558 static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
1559 .type = AMDGPU_RING_TYPE_VCN_DEC,
1561 .secure_submission_supported = true,
1562 .vmhub = AMDGPU_MMHUB_0,
1563 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1564 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1565 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1567 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1568 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1569 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1570 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1572 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1573 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1574 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1575 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1576 .test_ring = vcn_v2_0_dec_ring_test_ring,
1577 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1578 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1579 .insert_start = vcn_v2_0_dec_ring_insert_start,
1580 .insert_end = vcn_v2_0_dec_ring_insert_end,
1581 .pad_ib = amdgpu_ring_generic_pad_ib,
1582 .begin_use = amdgpu_vcn_ring_begin_use,
1583 .end_use = amdgpu_vcn_ring_end_use,
1584 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1585 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1586 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1590 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1592 * @ring: amdgpu_ring pointer
1594 * Returns the current hardware enc read pointer
1596 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1598 struct amdgpu_device *adev = ring->adev;
1600 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1601 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1603 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1607 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1609 * @ring: amdgpu_ring pointer
1611 * Returns the current hardware enc write pointer
1613 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1615 struct amdgpu_device *adev = ring->adev;
1617 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1618 if (ring->use_doorbell)
1619 return *ring->wptr_cpu_addr;
1621 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1623 if (ring->use_doorbell)
1624 return *ring->wptr_cpu_addr;
1626 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1631 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1633 * @ring: amdgpu_ring pointer
1635 * Commits the enc write pointer to the hardware
1637 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1639 struct amdgpu_device *adev = ring->adev;
1641 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1642 if (ring->use_doorbell) {
1643 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1644 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1646 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1649 if (ring->use_doorbell) {
1650 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1651 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1653 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1658 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1659 .type = AMDGPU_RING_TYPE_VCN_ENC,
1661 .nop = VCN_ENC_CMD_NO_OP,
1662 .vmhub = AMDGPU_MMHUB_1,
1663 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1664 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1665 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1667 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1668 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1669 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1670 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1671 1, /* vcn_v2_0_enc_ring_insert_end */
1672 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1673 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1674 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1675 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1676 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1677 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1678 .insert_nop = amdgpu_ring_insert_nop,
1679 .insert_end = vcn_v2_0_enc_ring_insert_end,
1680 .pad_ib = amdgpu_ring_generic_pad_ib,
1681 .begin_use = amdgpu_vcn_ring_begin_use,
1682 .end_use = amdgpu_vcn_ring_end_use,
1683 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1684 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1685 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1688 static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
1689 .type = AMDGPU_RING_TYPE_VCN_ENC,
1691 .nop = VCN_ENC_CMD_NO_OP,
1692 .vmhub = AMDGPU_MMHUB_0,
1693 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1694 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1695 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1697 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1698 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1699 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1700 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1701 1, /* vcn_v2_0_enc_ring_insert_end */
1702 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1703 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1704 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1705 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1706 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1707 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1708 .insert_nop = amdgpu_ring_insert_nop,
1709 .insert_end = vcn_v2_0_enc_ring_insert_end,
1710 .pad_ib = amdgpu_ring_generic_pad_ib,
1711 .begin_use = amdgpu_vcn_ring_begin_use,
1712 .end_use = amdgpu_vcn_ring_end_use,
1713 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1714 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1715 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1718 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1722 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1723 if (adev->vcn.harvest_config & (1 << i))
1725 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
1726 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1727 else /* CHIP_ALDEBARAN */
1728 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
1729 adev->vcn.inst[i].ring_dec.me = i;
1730 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1734 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1738 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1739 if (adev->vcn.harvest_config & (1 << j))
1741 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1742 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
1743 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1744 else /* CHIP_ALDEBARAN */
1745 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
1746 adev->vcn.inst[j].ring_enc[i].me = j;
1748 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1752 static bool vcn_v2_5_is_idle(void *handle)
1754 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1757 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1758 if (adev->vcn.harvest_config & (1 << i))
1760 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1766 static int vcn_v2_5_wait_for_idle(void *handle)
1768 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1771 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1772 if (adev->vcn.harvest_config & (1 << i))
1774 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1783 static int vcn_v2_5_set_clockgating_state(void *handle,
1784 enum amd_clockgating_state state)
1786 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1787 bool enable = (state == AMD_CG_STATE_GATE);
1789 if (amdgpu_sriov_vf(adev))
1793 if (!vcn_v2_5_is_idle(handle))
1795 vcn_v2_5_enable_clock_gating(adev);
1797 vcn_v2_5_disable_clock_gating(adev);
1803 static int vcn_v2_5_set_powergating_state(void *handle,
1804 enum amd_powergating_state state)
1806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1809 if (amdgpu_sriov_vf(adev))
1812 if(state == adev->vcn.cur_state)
1815 if (state == AMD_PG_STATE_GATE)
1816 ret = vcn_v2_5_stop(adev);
1818 ret = vcn_v2_5_start(adev);
1821 adev->vcn.cur_state = state;
1826 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1827 struct amdgpu_irq_src *source,
1829 enum amdgpu_interrupt_state state)
1834 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1835 struct amdgpu_irq_src *source,
1836 struct amdgpu_iv_entry *entry)
1838 uint32_t ip_instance;
1840 switch (entry->client_id) {
1841 case SOC15_IH_CLIENTID_VCN:
1844 case SOC15_IH_CLIENTID_VCN1:
1848 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1852 DRM_DEBUG("IH: VCN TRAP\n");
1854 switch (entry->src_id) {
1855 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1856 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1858 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1859 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1861 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1862 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1864 case VCN_2_6__SRCID_UVD_POISON:
1865 amdgpu_vcn_process_poison_irq(adev, source, entry);
1868 DRM_ERROR("Unhandled interrupt: %d %d\n",
1869 entry->src_id, entry->src_data[0]);
1876 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1877 .set = vcn_v2_5_set_interrupt_state,
1878 .process = vcn_v2_5_process_interrupt,
1881 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1885 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1886 if (adev->vcn.harvest_config & (1 << i))
1888 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1889 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1893 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1895 .early_init = vcn_v2_5_early_init,
1897 .sw_init = vcn_v2_5_sw_init,
1898 .sw_fini = vcn_v2_5_sw_fini,
1899 .hw_init = vcn_v2_5_hw_init,
1900 .hw_fini = vcn_v2_5_hw_fini,
1901 .suspend = vcn_v2_5_suspend,
1902 .resume = vcn_v2_5_resume,
1903 .is_idle = vcn_v2_5_is_idle,
1904 .wait_for_idle = vcn_v2_5_wait_for_idle,
1905 .check_soft_reset = NULL,
1906 .pre_soft_reset = NULL,
1908 .post_soft_reset = NULL,
1909 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1910 .set_powergating_state = vcn_v2_5_set_powergating_state,
1913 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1915 .early_init = vcn_v2_5_early_init,
1917 .sw_init = vcn_v2_5_sw_init,
1918 .sw_fini = vcn_v2_5_sw_fini,
1919 .hw_init = vcn_v2_5_hw_init,
1920 .hw_fini = vcn_v2_5_hw_fini,
1921 .suspend = vcn_v2_5_suspend,
1922 .resume = vcn_v2_5_resume,
1923 .is_idle = vcn_v2_5_is_idle,
1924 .wait_for_idle = vcn_v2_5_wait_for_idle,
1925 .check_soft_reset = NULL,
1926 .pre_soft_reset = NULL,
1928 .post_soft_reset = NULL,
1929 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1930 .set_powergating_state = vcn_v2_5_set_powergating_state,
1933 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1935 .type = AMD_IP_BLOCK_TYPE_VCN,
1939 .funcs = &vcn_v2_5_ip_funcs,
1942 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1944 .type = AMD_IP_BLOCK_TYPE_VCN,
1948 .funcs = &vcn_v2_6_ip_funcs,
1951 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
1952 uint32_t instance, uint32_t sub_block)
1954 uint32_t poison_stat = 0, reg_value = 0;
1956 switch (sub_block) {
1957 case AMDGPU_VCN_V2_6_VCPU_VCODEC:
1958 reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
1959 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1966 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1967 instance, sub_block);
1972 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
1975 uint32_t poison_stat = 0;
1977 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1978 for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
1980 vcn_v2_6_query_poison_by_instance(adev, inst, sub);
1982 return !!poison_stat;
1985 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
1986 .query_poison_status = vcn_v2_6_query_poison_status,
1989 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
1991 .hw_ops = &vcn_v2_6_ras_hw_ops,
1995 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
1997 switch (adev->ip_versions[VCN_HWIP][0]) {
1998 case IP_VERSION(2, 6, 0):
1999 adev->vcn.ras = &vcn_v2_6_ras;
2005 if (adev->vcn.ras) {
2006 amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
2008 strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
2009 adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
2010 adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
2011 adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
2013 /* If don't define special ras_late_init function, use default ras_late_init */
2014 if (!adev->vcn.ras->ras_block.ras_late_init)
2015 adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;