2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
31 #include "soc15_hw_ip.h"
33 #include "mmsch_v4_0.h"
36 #include "vcn/vcn_4_0_0_offset.h"
37 #include "vcn/vcn_4_0_0_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
40 #include <drm/drm_drv.h>
42 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
43 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
44 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
45 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
47 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
48 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300
50 #define VCN_HARVEST_MMSCH 0
52 #define RDECODE_MSG_CREATE 0x00000000
53 #define RDECODE_MESSAGE_CREATE 0x00000001
55 static int amdgpu_ih_clientid_vcns[] = {
56 SOC15_IH_CLIENTID_VCN,
57 SOC15_IH_CLIENTID_VCN1
60 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
61 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
62 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
63 static int vcn_v4_0_set_powergating_state(void *handle,
64 enum amd_powergating_state state);
65 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
66 int inst_idx, struct dpg_pause_state *new_state);
67 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
68 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
71 * vcn_v4_0_early_init - set function pointers and load microcode
73 * @handle: amdgpu_device pointer
75 * Set ring and irq function pointers
76 * Load microcode from filesystem
78 static int vcn_v4_0_early_init(void *handle)
80 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
83 if (amdgpu_sriov_vf(adev)) {
84 adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
85 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
86 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
87 adev->vcn.harvest_config |= 1 << i;
88 dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i);
93 /* re-use enc ring as unified ring */
94 adev->vcn.num_enc_rings = 1;
96 vcn_v4_0_set_unified_ring_funcs(adev);
97 vcn_v4_0_set_irq_funcs(adev);
98 vcn_v4_0_set_ras_funcs(adev);
100 return amdgpu_vcn_early_init(adev);
103 static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
105 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
107 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
108 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
109 fw_shared->sq.is_enabled = 1;
111 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
112 fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
113 AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
115 if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
116 IP_VERSION(4, 0, 2)) {
117 fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
118 fw_shared->drm_key_wa.method =
119 AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
122 if (amdgpu_vcnfw_log)
123 amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
129 * vcn_v4_0_sw_init - sw init for VCN block
131 * @handle: amdgpu_device pointer
133 * Load firmware and sw initialization
135 static int vcn_v4_0_sw_init(void *handle)
137 struct amdgpu_ring *ring;
138 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
141 r = amdgpu_vcn_sw_init(adev);
145 amdgpu_vcn_setup_ucode(adev);
147 r = amdgpu_vcn_resume(adev);
151 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
152 if (adev->vcn.harvest_config & (1 << i))
155 /* Init instance 0 sched_score to 1, so it's scheduled after other instances */
157 atomic_set(&adev->vcn.inst[i].sched_score, 1);
159 atomic_set(&adev->vcn.inst[i].sched_score, 0);
161 /* VCN UNIFIED TRAP */
162 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
163 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
167 /* VCN POISON TRAP */
168 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
169 VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
173 ring = &adev->vcn.inst[i].ring_enc[0];
174 ring->use_doorbell = true;
175 if (amdgpu_sriov_vf(adev))
176 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
178 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
179 ring->vm_hub = AMDGPU_MMHUB0(0);
180 sprintf(ring->name, "vcn_unified_%d", i);
182 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
183 AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
187 vcn_v4_0_fw_shared_init(adev, i);
190 if (amdgpu_sriov_vf(adev)) {
191 r = amdgpu_virt_alloc_mm_table(adev);
196 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
197 adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
199 r = amdgpu_vcn_ras_sw_init(adev);
207 * vcn_v4_0_sw_fini - sw fini for VCN block
209 * @handle: amdgpu_device pointer
211 * VCN suspend and free up sw allocation
213 static int vcn_v4_0_sw_fini(void *handle)
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
218 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
219 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
220 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
222 if (adev->vcn.harvest_config & (1 << i))
225 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
226 fw_shared->present_flag_0 = 0;
227 fw_shared->sq.is_enabled = 0;
233 if (amdgpu_sriov_vf(adev))
234 amdgpu_virt_free_mm_table(adev);
236 r = amdgpu_vcn_suspend(adev);
240 r = amdgpu_vcn_sw_fini(adev);
246 * vcn_v4_0_hw_init - start and test VCN block
248 * @handle: amdgpu_device pointer
250 * Initialize the hardware, boot up the VCPU and do some testing
252 static int vcn_v4_0_hw_init(void *handle)
254 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
255 struct amdgpu_ring *ring;
258 if (amdgpu_sriov_vf(adev)) {
259 r = vcn_v4_0_start_sriov(adev);
263 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
264 if (adev->vcn.harvest_config & (1 << i))
267 ring = &adev->vcn.inst[i].ring_enc[0];
270 vcn_v4_0_unified_ring_set_wptr(ring);
271 ring->sched.ready = true;
275 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
276 if (adev->vcn.harvest_config & (1 << i))
279 ring = &adev->vcn.inst[i].ring_enc[0];
281 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
282 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
284 r = amdgpu_ring_test_helper(ring);
293 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
294 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
300 * vcn_v4_0_hw_fini - stop the hardware block
302 * @handle: amdgpu_device pointer
304 * Stop the VCN block, mark ring as not ready any more
306 static int vcn_v4_0_hw_fini(void *handle)
308 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
311 cancel_delayed_work_sync(&adev->vcn.idle_work);
313 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
314 if (adev->vcn.harvest_config & (1 << i))
316 if (!amdgpu_sriov_vf(adev)) {
317 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
318 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
319 RREG32_SOC15(VCN, i, regUVD_STATUS))) {
320 vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
323 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
324 amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
331 * vcn_v4_0_suspend - suspend VCN block
333 * @handle: amdgpu_device pointer
335 * HW fini and suspend VCN block
337 static int vcn_v4_0_suspend(void *handle)
340 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
342 r = vcn_v4_0_hw_fini(adev);
346 r = amdgpu_vcn_suspend(adev);
352 * vcn_v4_0_resume - resume VCN block
354 * @handle: amdgpu_device pointer
356 * Resume firmware and hw init VCN block
358 static int vcn_v4_0_resume(void *handle)
361 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
363 r = amdgpu_vcn_resume(adev);
367 r = vcn_v4_0_hw_init(adev);
373 * vcn_v4_0_mc_resume - memory controller programming
375 * @adev: amdgpu_device pointer
376 * @inst: instance number
378 * Let the VCN memory controller know it's offsets
380 static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
382 uint32_t offset, size;
383 const struct common_firmware_header *hdr;
385 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
386 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
388 /* cache window 0: fw */
389 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
390 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
391 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
392 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
393 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
394 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
397 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
398 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
399 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
400 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
402 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
404 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
406 /* cache window 1: stack */
407 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
408 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
409 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
410 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
411 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
412 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
414 /* cache window 2: context */
415 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
416 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
417 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
418 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
419 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
420 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
422 /* non-cache window */
423 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
424 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
425 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
426 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
427 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
428 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
429 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
433 * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
435 * @adev: amdgpu_device pointer
436 * @inst_idx: instance number index
437 * @indirect: indirectly write sram
439 * Let the VCN memory controller know it's offsets with dpg mode
441 static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
443 uint32_t offset, size;
444 const struct common_firmware_header *hdr;
445 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
446 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
448 /* cache window 0: fw */
449 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
451 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
452 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
453 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
454 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
455 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
456 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
457 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
458 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
460 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
461 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
462 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
463 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
464 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
465 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
469 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
470 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
471 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
472 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
473 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
474 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
476 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
477 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
478 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
482 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
483 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
485 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
486 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
488 /* cache window 1: stack */
490 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
491 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
492 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
493 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
494 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
495 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
496 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
497 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
499 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
501 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
502 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
503 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
504 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
506 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
507 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
509 /* cache window 2: context */
510 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
511 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
512 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
513 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
514 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
515 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
516 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
517 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
518 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
519 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
521 /* non-cache window */
522 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
523 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
524 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
525 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
526 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
527 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
528 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
529 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
530 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
531 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
532 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
534 /* VCN global tiling registers */
535 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
536 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
540 * vcn_v4_0_disable_static_power_gating - disable VCN static power gating
542 * @adev: amdgpu_device pointer
543 * @inst: instance number
545 * Disable static power gating for VCN block
547 static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
551 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
552 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
553 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
554 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
555 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
556 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
557 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
558 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
559 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
560 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
561 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
562 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
563 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
564 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
565 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
567 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
568 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS,
569 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
573 value = (inst) ? 0x2200800 : 0;
574 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
575 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
576 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
577 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
578 | 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
579 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
580 | 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
581 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
582 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
583 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
584 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
585 | 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
586 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
587 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
589 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
590 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, value, 0x3F3FFFFF);
593 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
595 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
596 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
597 UVD_POWER_STATUS__UVD_PG_EN_MASK;
599 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
605 * vcn_v4_0_enable_static_power_gating - enable VCN static power gating
607 * @adev: amdgpu_device pointer
608 * @inst: instance number
610 * Enable static power gating for VCN block
612 static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
616 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
617 /* Before power off, this indicator has to be turned on */
618 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
619 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
620 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
621 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
623 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
624 | 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
625 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
626 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
627 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
628 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
629 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
630 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
631 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
632 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
633 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
634 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
635 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
636 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
637 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
639 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
640 | 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT
641 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
642 | 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT
643 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
644 | 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT
645 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
646 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
647 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
648 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
649 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
650 | 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT
651 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
652 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
653 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
660 * vcn_v4_0_disable_clock_gating - disable VCN clock gating
662 * @adev: amdgpu_device pointer
663 * @inst: instance number
665 * Disable clock gating for VCN block
667 static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
671 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
674 /* VCN disable CGC */
675 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
676 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
677 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
678 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
679 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
681 data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE);
682 data &= ~(UVD_CGC_GATE__SYS_MASK
683 | UVD_CGC_GATE__UDEC_MASK
684 | UVD_CGC_GATE__MPEG2_MASK
685 | UVD_CGC_GATE__REGS_MASK
686 | UVD_CGC_GATE__RBC_MASK
687 | UVD_CGC_GATE__LMI_MC_MASK
688 | UVD_CGC_GATE__LMI_UMC_MASK
689 | UVD_CGC_GATE__IDCT_MASK
690 | UVD_CGC_GATE__MPRD_MASK
691 | UVD_CGC_GATE__MPC_MASK
692 | UVD_CGC_GATE__LBSI_MASK
693 | UVD_CGC_GATE__LRBBM_MASK
694 | UVD_CGC_GATE__UDEC_RE_MASK
695 | UVD_CGC_GATE__UDEC_CM_MASK
696 | UVD_CGC_GATE__UDEC_IT_MASK
697 | UVD_CGC_GATE__UDEC_DB_MASK
698 | UVD_CGC_GATE__UDEC_MP_MASK
699 | UVD_CGC_GATE__WCB_MASK
700 | UVD_CGC_GATE__VCPU_MASK
701 | UVD_CGC_GATE__MMSCH_MASK);
703 WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data);
704 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
706 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
707 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
708 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
709 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
710 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
711 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
712 | UVD_CGC_CTRL__SYS_MODE_MASK
713 | UVD_CGC_CTRL__UDEC_MODE_MASK
714 | UVD_CGC_CTRL__MPEG2_MODE_MASK
715 | UVD_CGC_CTRL__REGS_MODE_MASK
716 | UVD_CGC_CTRL__RBC_MODE_MASK
717 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
718 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
719 | UVD_CGC_CTRL__IDCT_MODE_MASK
720 | UVD_CGC_CTRL__MPRD_MODE_MASK
721 | UVD_CGC_CTRL__MPC_MODE_MASK
722 | UVD_CGC_CTRL__LBSI_MODE_MASK
723 | UVD_CGC_CTRL__LRBBM_MODE_MASK
724 | UVD_CGC_CTRL__WCB_MODE_MASK
725 | UVD_CGC_CTRL__VCPU_MODE_MASK
726 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
727 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
729 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE);
730 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
731 | UVD_SUVD_CGC_GATE__SIT_MASK
732 | UVD_SUVD_CGC_GATE__SMP_MASK
733 | UVD_SUVD_CGC_GATE__SCM_MASK
734 | UVD_SUVD_CGC_GATE__SDB_MASK
735 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
736 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
737 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
738 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
739 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
740 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
741 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
742 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
743 | UVD_SUVD_CGC_GATE__SCLR_MASK
744 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
745 | UVD_SUVD_CGC_GATE__ENT_MASK
746 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
747 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
748 | UVD_SUVD_CGC_GATE__SITE_MASK
749 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
750 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
751 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
752 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
753 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
754 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data);
756 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
757 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
758 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
759 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
760 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
761 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
762 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
763 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
764 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
765 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
766 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
767 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
771 * vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
773 * @adev: amdgpu_device pointer
774 * @sram_sel: sram select
775 * @inst_idx: instance number index
776 * @indirect: indirectly write sram
778 * Disable clock gating for VCN block with dpg mode
780 static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
781 int inst_idx, uint8_t indirect)
783 uint32_t reg_data = 0;
785 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
788 /* enable sw clock gating control */
789 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
790 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
791 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
792 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
793 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
794 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
795 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
796 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
797 UVD_CGC_CTRL__SYS_MODE_MASK |
798 UVD_CGC_CTRL__UDEC_MODE_MASK |
799 UVD_CGC_CTRL__MPEG2_MODE_MASK |
800 UVD_CGC_CTRL__REGS_MODE_MASK |
801 UVD_CGC_CTRL__RBC_MODE_MASK |
802 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
803 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
804 UVD_CGC_CTRL__IDCT_MODE_MASK |
805 UVD_CGC_CTRL__MPRD_MODE_MASK |
806 UVD_CGC_CTRL__MPC_MODE_MASK |
807 UVD_CGC_CTRL__LBSI_MODE_MASK |
808 UVD_CGC_CTRL__LRBBM_MODE_MASK |
809 UVD_CGC_CTRL__WCB_MODE_MASK |
810 UVD_CGC_CTRL__VCPU_MODE_MASK);
811 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
812 VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
814 /* turn off clock gating */
815 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
816 VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect);
818 /* turn on SUVD clock gating */
819 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
820 VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
822 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
823 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
824 VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
828 * vcn_v4_0_enable_clock_gating - enable VCN clock gating
830 * @adev: amdgpu_device pointer
831 * @inst: instance number
833 * Enable clock gating for VCN block
835 static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
839 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
843 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
844 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
845 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
846 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
847 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
849 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
850 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
851 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
852 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
853 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
854 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
855 | UVD_CGC_CTRL__SYS_MODE_MASK
856 | UVD_CGC_CTRL__UDEC_MODE_MASK
857 | UVD_CGC_CTRL__MPEG2_MODE_MASK
858 | UVD_CGC_CTRL__REGS_MODE_MASK
859 | UVD_CGC_CTRL__RBC_MODE_MASK
860 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
861 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
862 | UVD_CGC_CTRL__IDCT_MODE_MASK
863 | UVD_CGC_CTRL__MPRD_MODE_MASK
864 | UVD_CGC_CTRL__MPC_MODE_MASK
865 | UVD_CGC_CTRL__LBSI_MODE_MASK
866 | UVD_CGC_CTRL__LRBBM_MODE_MASK
867 | UVD_CGC_CTRL__WCB_MODE_MASK
868 | UVD_CGC_CTRL__VCPU_MODE_MASK
869 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
870 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
872 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
873 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
874 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
875 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
876 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
877 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
878 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
879 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
880 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
881 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
882 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
883 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
886 static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
891 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
894 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
895 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
896 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
897 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
898 WREG32_SOC15_DPG_MODE(inst_idx,
899 SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
902 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
903 WREG32_SOC15_DPG_MODE(inst_idx,
904 SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
909 * vcn_v4_0_start_dpg_mode - VCN start with dpg mode
911 * @adev: amdgpu_device pointer
912 * @inst_idx: instance number index
913 * @indirect: indirectly write sram
915 * Start VCN block with dpg mode
917 static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
919 volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
920 struct amdgpu_ring *ring;
923 /* disable register anti-hang mechanism */
924 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
925 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
926 /* enable dynamic power gating mode */
927 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
928 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
929 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
930 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
933 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
935 /* enable clock gating */
936 vcn_v4_0_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
938 /* enable VCPU clock */
939 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
940 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
941 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
942 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
944 /* disable master interupt */
945 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
946 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
948 /* setup regUVD_LMI_CTRL */
949 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
950 UVD_LMI_CTRL__REQ_MODE_MASK |
951 UVD_LMI_CTRL__CRC_RESET_MASK |
952 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
953 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
954 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
955 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
957 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
958 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
960 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
961 VCN, inst_idx, regUVD_MPC_CNTL),
962 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
964 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
965 VCN, inst_idx, regUVD_MPC_SET_MUXA0),
966 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
967 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
968 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
969 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
971 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
972 VCN, inst_idx, regUVD_MPC_SET_MUXB0),
973 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
974 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
975 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
976 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
978 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
979 VCN, inst_idx, regUVD_MPC_SET_MUX),
980 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
981 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
982 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
984 vcn_v4_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
986 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
987 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
988 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
989 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
991 /* enable LMI MC and UMC channels */
992 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
993 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
994 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
996 vcn_v4_0_enable_ras(adev, inst_idx, indirect);
998 /* enable master interrupt */
999 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1000 VCN, inst_idx, regUVD_MASTINT_EN),
1001 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1005 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
1007 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1009 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
1010 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1011 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
1013 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1014 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1015 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1016 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1017 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
1018 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
1020 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
1021 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
1022 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1024 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1025 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1026 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1027 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1029 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
1030 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1031 VCN_RB1_DB_CTRL__EN_MASK);
1038 * vcn_v4_0_start - VCN start
1040 * @adev: amdgpu_device pointer
1044 static int vcn_v4_0_start(struct amdgpu_device *adev)
1046 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1047 struct amdgpu_ring *ring;
1051 if (adev->pm.dpm_enabled)
1052 amdgpu_dpm_enable_uvd(adev, true);
1054 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1055 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1057 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1058 r = vcn_v4_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1062 /* disable VCN power gating */
1063 vcn_v4_0_disable_static_power_gating(adev, i);
1065 /* set VCN status busy */
1066 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1067 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
1069 /*SW clock gating */
1070 vcn_v4_0_disable_clock_gating(adev, i);
1072 /* enable VCPU clock */
1073 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1074 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1076 /* disable master interrupt */
1077 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
1078 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1080 /* enable LMI MC and UMC channels */
1081 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
1082 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1084 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1085 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1086 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1087 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1089 /* setup regUVD_LMI_CTRL */
1090 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
1091 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
1092 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1093 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1094 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1095 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1097 /* setup regUVD_MPC_CNTL */
1098 tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
1099 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1100 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1101 WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
1103 /* setup UVD_MPC_SET_MUXA0 */
1104 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
1105 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1106 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1107 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1108 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1110 /* setup UVD_MPC_SET_MUXB0 */
1111 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
1112 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1113 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1114 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1115 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1117 /* setup UVD_MPC_SET_MUX */
1118 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
1119 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1120 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1121 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1123 vcn_v4_0_mc_resume(adev, i);
1125 /* VCN global tiling registers */
1126 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
1127 adev->gfx.config.gb_addr_config);
1129 /* unblock VCPU register access */
1130 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
1131 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1133 /* release VCPU reset to boot */
1134 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1135 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1137 for (j = 0; j < 10; ++j) {
1140 for (k = 0; k < 100; ++k) {
1141 status = RREG32_SOC15(VCN, i, regUVD_STATUS);
1145 if (amdgpu_emu_mode == 1)
1149 if (amdgpu_emu_mode == 1) {
1160 dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
1161 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1162 UVD_VCPU_CNTL__BLK_RST_MASK,
1163 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1165 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1166 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1174 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
1178 /* enable master interrupt */
1179 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
1180 UVD_MASTINT_EN__VCPU_EN_MASK,
1181 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1183 /* clear the busy bit of VCN_STATUS */
1184 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
1185 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1187 ring = &adev->vcn.inst[i].ring_enc[0];
1188 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
1189 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1190 VCN_RB1_DB_CTRL__EN_MASK);
1192 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
1193 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1194 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
1196 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1197 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1198 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1199 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1200 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
1201 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
1203 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
1204 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
1205 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
1207 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1208 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1209 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1210 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1216 static int vcn_v4_0_init_ring_metadata(struct amdgpu_device *adev, uint32_t vcn_inst, struct amdgpu_ring *ring_enc)
1218 struct amdgpu_vcn_rb_metadata *rb_metadata = NULL;
1219 uint8_t *rb_ptr = (uint8_t *)ring_enc->ring;
1221 rb_ptr += ring_enc->ring_size;
1222 rb_metadata = (struct amdgpu_vcn_rb_metadata *)rb_ptr;
1224 memset(rb_metadata, 0, sizeof(struct amdgpu_vcn_rb_metadata));
1225 rb_metadata->size = sizeof(struct amdgpu_vcn_rb_metadata);
1226 rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1227 rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
1228 rb_metadata->version = 1;
1229 rb_metadata->ring_id = vcn_inst & 0xFF;
1234 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
1237 struct amdgpu_ring *ring_enc;
1238 uint64_t cache_addr;
1239 uint64_t rb_enc_addr;
1241 uint32_t param, resp, expected;
1242 uint32_t offset, cache_size;
1243 uint32_t tmp, timeout;
1245 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1246 uint32_t *table_loc;
1247 uint32_t table_size;
1248 uint32_t size, size_dw;
1249 uint32_t init_status;
1250 uint32_t enabled_vcn;
1252 struct mmsch_v4_0_cmd_direct_write
1253 direct_wt = { {0} };
1254 struct mmsch_v4_0_cmd_direct_read_modify_write
1255 direct_rd_mod_wt = { {0} };
1256 struct mmsch_v4_0_cmd_end end = { {0} };
1257 struct mmsch_v4_0_init_header header;
1259 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1260 volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
1262 direct_wt.cmd_header.command_type =
1263 MMSCH_COMMAND__DIRECT_REG_WRITE;
1264 direct_rd_mod_wt.cmd_header.command_type =
1265 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1266 end.cmd_header.command_type =
1269 header.version = MMSCH_VERSION;
1270 header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
1271 for (i = 0; i < MMSCH_V4_0_VCN_INSTANCES; i++) {
1272 header.inst[i].init_status = 0;
1273 header.inst[i].table_offset = 0;
1274 header.inst[i].table_size = 0;
1277 table_loc = (uint32_t *)table->cpu_addr;
1278 table_loc += header.total_size;
1279 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1280 if (adev->vcn.harvest_config & (1 << i))
1283 // Must re/init fw_shared at beginning
1284 vcn_v4_0_fw_shared_init(adev, i);
1288 MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1290 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1292 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1294 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1295 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1296 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1297 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1298 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1299 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1302 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1303 regUVD_VCPU_CACHE_OFFSET0),
1306 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1307 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1308 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1309 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1310 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1311 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1312 offset = cache_size;
1313 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1314 regUVD_VCPU_CACHE_OFFSET0),
1315 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1318 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1319 regUVD_VCPU_CACHE_SIZE0),
1322 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1323 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1324 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1325 lower_32_bits(cache_addr));
1326 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1327 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1328 upper_32_bits(cache_addr));
1329 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1330 regUVD_VCPU_CACHE_OFFSET1),
1332 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1333 regUVD_VCPU_CACHE_SIZE1),
1334 AMDGPU_VCN_STACK_SIZE);
1336 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1337 AMDGPU_VCN_STACK_SIZE;
1338 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1339 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1340 lower_32_bits(cache_addr));
1341 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1342 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1343 upper_32_bits(cache_addr));
1344 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1345 regUVD_VCPU_CACHE_OFFSET2),
1347 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1348 regUVD_VCPU_CACHE_SIZE2),
1349 AMDGPU_VCN_CONTEXT_SIZE);
1351 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1352 rb_setup = &fw_shared->rb_setup;
1354 ring_enc = &adev->vcn.inst[i].ring_enc[0];
1356 rb_enc_addr = ring_enc->gpu_addr;
1358 rb_setup->is_rb_enabled_flags |= RB_ENABLED;
1359 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1361 if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
1362 vcn_v4_0_init_ring_metadata(adev, i, ring_enc);
1364 memset((void *)&rb_setup->rb_info, 0, sizeof(struct amdgpu_vcn_rb_setup_info) * MAX_NUM_VCN_RB_SETUP);
1365 if (!(adev->vcn.harvest_config & (1 << 0))) {
1366 rb_setup->rb_info[0].rb_addr_lo = lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
1367 rb_setup->rb_info[0].rb_addr_hi = upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
1368 rb_setup->rb_info[0].rb_size = adev->vcn.inst[0].ring_enc[0].ring_size / 4;
1370 if (!(adev->vcn.harvest_config & (1 << 1))) {
1371 rb_setup->rb_info[2].rb_addr_lo = lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
1372 rb_setup->rb_info[2].rb_addr_hi = upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
1373 rb_setup->rb_info[2].rb_size = adev->vcn.inst[1].ring_enc[0].ring_size / 4;
1375 fw_shared->decouple.is_enabled = 1;
1376 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
1378 rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
1379 rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
1380 rb_setup->rb_size = ring_enc->ring_size / 4;
1383 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1384 regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
1385 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1386 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1387 regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
1388 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1389 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1390 regUVD_VCPU_NONCACHE_SIZE0),
1391 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
1393 /* add end packet */
1394 MMSCH_V4_0_INSERT_END();
1397 header.inst[i].init_status = 0;
1398 header.inst[i].table_offset = header.total_size;
1399 header.inst[i].table_size = table_size;
1400 header.total_size += table_size;
1403 /* Update init table header in memory */
1404 size = sizeof(struct mmsch_v4_0_init_header);
1405 table_loc = (uint32_t *)table->cpu_addr;
1406 memcpy((void *)table_loc, &header, size);
1408 /* message MMSCH (in VCN[0]) to initialize this client
1409 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1410 * of memory descriptor location
1412 ctx_addr = table->gpu_addr;
1413 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1414 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1416 /* 2, update vmid of descriptor */
1417 tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
1418 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1419 /* use domain0 for MM scheduler */
1420 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1421 WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
1423 /* 3, notify mmsch about the size of this descriptor */
1424 size = header.total_size;
1425 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
1427 /* 4, set resp to zero */
1428 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
1430 /* 5, kick off the initialization and wait until
1431 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1434 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
1438 expected = MMSCH_VF_MAILBOX_RESP__OK;
1439 while (resp != expected) {
1440 resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
1446 if (tmp >= timeout) {
1447 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1448 " waiting for regMMSCH_VF_MAILBOX_RESP "\
1449 "(expected=0x%08x, readback=0x%08x)\n",
1450 tmp, expected, resp);
1454 enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
1455 init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status;
1456 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
1457 && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
1458 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
1459 "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
1465 * vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
1467 * @adev: amdgpu_device pointer
1468 * @inst_idx: instance number index
1470 * Stop VCN block with dpg mode
1472 static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1474 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1477 vcn_v4_0_pause_dpg_mode(adev, inst_idx, &state);
1478 /* Wait for power status to be 1 */
1479 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1480 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1482 /* wait for read ptr to be equal to write ptr */
1483 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1484 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1486 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1487 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1489 /* disable dynamic power gating mode */
1490 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
1491 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1495 * vcn_v4_0_stop - VCN stop
1497 * @adev: amdgpu_device pointer
1501 static int vcn_v4_0_stop(struct amdgpu_device *adev)
1503 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1507 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1508 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1509 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1511 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1512 vcn_v4_0_stop_dpg_mode(adev, i);
1516 /* wait for vcn idle */
1517 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1521 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1522 UVD_LMI_STATUS__READ_CLEAN_MASK |
1523 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1524 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1525 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1529 /* disable LMI UMC channel */
1530 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1531 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1532 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1533 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1534 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1535 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1539 /* block VCPU register access */
1540 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1541 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1542 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1545 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1546 UVD_VCPU_CNTL__BLK_RST_MASK,
1547 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1549 /* disable VCPU clock */
1550 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1551 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1553 /* apply soft reset */
1554 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1555 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1556 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1557 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1558 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1559 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1562 WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1564 /* apply HW clock gating */
1565 vcn_v4_0_enable_clock_gating(adev, i);
1567 /* enable VCN power gating */
1568 vcn_v4_0_enable_static_power_gating(adev, i);
1571 if (adev->pm.dpm_enabled)
1572 amdgpu_dpm_enable_uvd(adev, false);
1578 * vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode
1580 * @adev: amdgpu_device pointer
1581 * @inst_idx: instance number index
1582 * @new_state: pause state
1584 * Pause dpg mode for VCN block
1586 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1587 struct dpg_pause_state *new_state)
1589 uint32_t reg_data = 0;
1592 /* pause/unpause if state is changed */
1593 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1594 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1595 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1596 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1597 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1599 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1600 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1601 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1605 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1606 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1609 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1610 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1611 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1613 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS,
1614 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1617 /* unpause dpg, no need to wait */
1618 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1619 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1621 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1628 * vcn_v4_0_unified_ring_get_rptr - get unified read pointer
1630 * @ring: amdgpu_ring pointer
1632 * Returns the current hardware unified read pointer
1634 static uint64_t vcn_v4_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1636 struct amdgpu_device *adev = ring->adev;
1638 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1639 DRM_ERROR("wrong ring id is identified in %s", __func__);
1641 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1645 * vcn_v4_0_unified_ring_get_wptr - get unified write pointer
1647 * @ring: amdgpu_ring pointer
1649 * Returns the current hardware unified write pointer
1651 static uint64_t vcn_v4_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1653 struct amdgpu_device *adev = ring->adev;
1655 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1656 DRM_ERROR("wrong ring id is identified in %s", __func__);
1658 if (ring->use_doorbell)
1659 return *ring->wptr_cpu_addr;
1661 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1665 * vcn_v4_0_unified_ring_set_wptr - set enc write pointer
1667 * @ring: amdgpu_ring pointer
1669 * Commits the enc write pointer to the hardware
1671 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1673 struct amdgpu_device *adev = ring->adev;
1675 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1676 DRM_ERROR("wrong ring id is identified in %s", __func__);
1678 if (ring->use_doorbell) {
1679 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1680 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1682 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1686 static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
1687 struct amdgpu_job *job)
1689 struct drm_gpu_scheduler **scheds;
1691 /* The create msg must be in the first IB submitted */
1692 if (atomic_read(&job->base.entity->fence_seq))
1695 /* if VCN0 is harvested, we can't support AV1 */
1696 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
1699 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
1700 [AMDGPU_RING_PRIO_0].sched;
1701 drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1705 static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1708 struct ttm_operation_ctx ctx = { false, false };
1709 struct amdgpu_bo_va_mapping *map;
1710 uint32_t *msg, num_buffers;
1711 struct amdgpu_bo *bo;
1712 uint64_t start, end;
1717 addr &= AMDGPU_GMC_HOLE_MASK;
1718 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1720 DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
1724 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1725 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1727 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1731 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1732 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1733 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1735 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1739 r = amdgpu_bo_kmap(bo, &ptr);
1741 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1745 msg = ptr + addr - start;
1748 if (msg[1] > end - addr) {
1753 if (msg[3] != RDECODE_MSG_CREATE)
1756 num_buffers = msg[2];
1757 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1758 uint32_t offset, size, *create;
1760 if (msg[0] != RDECODE_MESSAGE_CREATE)
1766 if (offset + size > end) {
1771 create = ptr + addr + offset - start;
1773 /* H264, HEVC and VP9 can run on any instance */
1774 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1777 r = vcn_v4_0_limit_sched(p, job);
1783 amdgpu_bo_kunmap(bo);
1787 #define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
1788 #define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
1790 #define RADEON_VCN_ENGINE_INFO (0x30000001)
1791 #define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
1793 #define RENCODE_ENCODE_STANDARD_AV1 2
1794 #define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
1795 #define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
1797 /* return the offset in ib if id is found, -1 otherwise
1798 * to speed up the searching we only search upto max_offset
1800 static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
1804 for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
1805 if (ib->ptr[i + 1] == id)
1811 static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1812 struct amdgpu_job *job,
1813 struct amdgpu_ib *ib)
1815 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1816 struct amdgpu_vcn_decode_buffer *decode_buffer;
1821 /* The first instance can decode anything */
1825 /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
1826 idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
1827 RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
1828 if (idx < 0) /* engine info is missing */
1831 val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
1832 if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
1833 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
1835 if (!(decode_buffer->valid_buf_flag & 0x1))
1838 addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
1839 decode_buffer->msg_buffer_address_lo;
1840 return vcn_v4_0_dec_msg(p, job, addr);
1841 } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
1842 idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
1843 RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
1844 if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
1845 return vcn_v4_0_limit_sched(p, job);
1850 static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
1851 .type = AMDGPU_RING_TYPE_VCN_ENC,
1853 .nop = VCN_ENC_CMD_NO_OP,
1854 .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
1855 .get_rptr = vcn_v4_0_unified_ring_get_rptr,
1856 .get_wptr = vcn_v4_0_unified_ring_get_wptr,
1857 .set_wptr = vcn_v4_0_unified_ring_set_wptr,
1858 .patch_cs_in_place = vcn_v4_0_ring_patch_cs_in_place,
1860 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1861 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1862 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1863 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1864 1, /* vcn_v2_0_enc_ring_insert_end */
1865 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1866 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1867 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1868 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1869 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1870 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1871 .insert_nop = amdgpu_ring_insert_nop,
1872 .insert_end = vcn_v2_0_enc_ring_insert_end,
1873 .pad_ib = amdgpu_ring_generic_pad_ib,
1874 .begin_use = amdgpu_vcn_ring_begin_use,
1875 .end_use = amdgpu_vcn_ring_end_use,
1876 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1877 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1878 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1882 * vcn_v4_0_set_unified_ring_funcs - set unified ring functions
1884 * @adev: amdgpu_device pointer
1886 * Set unified ring functions
1888 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1892 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1893 if (adev->vcn.harvest_config & (1 << i))
1896 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 2))
1897 vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true;
1899 adev->vcn.inst[i].ring_enc[0].funcs =
1900 (const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs;
1901 adev->vcn.inst[i].ring_enc[0].me = i;
1903 DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
1908 * vcn_v4_0_is_idle - check VCN block is idle
1910 * @handle: amdgpu_device pointer
1912 * Check whether VCN block is idle
1914 static bool vcn_v4_0_is_idle(void *handle)
1916 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1919 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1920 if (adev->vcn.harvest_config & (1 << i))
1923 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1930 * vcn_v4_0_wait_for_idle - wait for VCN block idle
1932 * @handle: amdgpu_device pointer
1934 * Wait for VCN block idle
1936 static int vcn_v4_0_wait_for_idle(void *handle)
1938 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1941 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1942 if (adev->vcn.harvest_config & (1 << i))
1945 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1955 * vcn_v4_0_set_clockgating_state - set VCN block clockgating state
1957 * @handle: amdgpu_device pointer
1958 * @state: clock gating state
1960 * Set VCN block clockgating state
1962 static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
1964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1965 bool enable = state == AMD_CG_STATE_GATE;
1968 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1969 if (adev->vcn.harvest_config & (1 << i))
1973 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1975 vcn_v4_0_enable_clock_gating(adev, i);
1977 vcn_v4_0_disable_clock_gating(adev, i);
1985 * vcn_v4_0_set_powergating_state - set VCN block powergating state
1987 * @handle: amdgpu_device pointer
1988 * @state: power gating state
1990 * Set VCN block powergating state
1992 static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state)
1994 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997 /* for SRIOV, guest should not control VCN Power-gating
1998 * MMSCH FW should control Power-gating and clock-gating
1999 * guest should avoid touching CGC and PG
2001 if (amdgpu_sriov_vf(adev)) {
2002 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
2006 if (state == adev->vcn.cur_state)
2009 if (state == AMD_PG_STATE_GATE)
2010 ret = vcn_v4_0_stop(adev);
2012 ret = vcn_v4_0_start(adev);
2015 adev->vcn.cur_state = state;
2021 * vcn_v4_0_set_interrupt_state - set VCN block interrupt state
2023 * @adev: amdgpu_device pointer
2024 * @source: interrupt sources
2025 * @type: interrupt types
2026 * @state: interrupt states
2028 * Set VCN block interrupt state
2030 static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
2031 unsigned type, enum amdgpu_interrupt_state state)
2037 * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
2039 * @adev: amdgpu_device pointer
2040 * @source: interrupt sources
2041 * @type: interrupt types
2042 * @state: interrupt states
2044 * Set VCN block RAS interrupt state
2046 static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
2047 struct amdgpu_irq_src *source,
2049 enum amdgpu_interrupt_state state)
2055 * vcn_v4_0_process_interrupt - process VCN block interrupt
2057 * @adev: amdgpu_device pointer
2058 * @source: interrupt sources
2059 * @entry: interrupt entry from clients and sources
2061 * Process VCN block interrupt
2063 static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
2064 struct amdgpu_iv_entry *entry)
2066 uint32_t ip_instance;
2068 if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
2069 ip_instance = entry->ring_id;
2071 switch (entry->client_id) {
2072 case SOC15_IH_CLIENTID_VCN:
2075 case SOC15_IH_CLIENTID_VCN1:
2079 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2084 DRM_DEBUG("IH: VCN TRAP\n");
2086 switch (entry->src_id) {
2087 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2088 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2091 DRM_ERROR("Unhandled interrupt: %d %d\n",
2092 entry->src_id, entry->src_data[0]);
2099 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
2100 .set = vcn_v4_0_set_interrupt_state,
2101 .process = vcn_v4_0_process_interrupt,
2104 static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
2105 .set = vcn_v4_0_set_ras_interrupt_state,
2106 .process = amdgpu_vcn_process_poison_irq,
2110 * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
2112 * @adev: amdgpu_device pointer
2114 * Set VCN block interrupt irq functions
2116 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
2120 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2121 if (adev->vcn.harvest_config & (1 << i))
2124 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
2125 adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
2127 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
2128 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
2132 static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
2134 .early_init = vcn_v4_0_early_init,
2136 .sw_init = vcn_v4_0_sw_init,
2137 .sw_fini = vcn_v4_0_sw_fini,
2138 .hw_init = vcn_v4_0_hw_init,
2139 .hw_fini = vcn_v4_0_hw_fini,
2140 .suspend = vcn_v4_0_suspend,
2141 .resume = vcn_v4_0_resume,
2142 .is_idle = vcn_v4_0_is_idle,
2143 .wait_for_idle = vcn_v4_0_wait_for_idle,
2144 .check_soft_reset = NULL,
2145 .pre_soft_reset = NULL,
2147 .post_soft_reset = NULL,
2148 .set_clockgating_state = vcn_v4_0_set_clockgating_state,
2149 .set_powergating_state = vcn_v4_0_set_powergating_state,
2152 const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
2153 .type = AMD_IP_BLOCK_TYPE_VCN,
2157 .funcs = &vcn_v4_0_ip_funcs,
2160 static uint32_t vcn_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
2161 uint32_t instance, uint32_t sub_block)
2163 uint32_t poison_stat = 0, reg_value = 0;
2165 switch (sub_block) {
2166 case AMDGPU_VCN_V4_0_VCPU_VCODEC:
2167 reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
2168 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2175 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2176 instance, sub_block);
2181 static bool vcn_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
2184 uint32_t poison_stat = 0;
2186 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2187 for (sub = 0; sub < AMDGPU_VCN_V4_0_MAX_SUB_BLOCK; sub++)
2189 vcn_v4_0_query_poison_by_instance(adev, inst, sub);
2191 return !!poison_stat;
2194 const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
2195 .query_poison_status = vcn_v4_0_query_ras_poison_status,
2198 static struct amdgpu_vcn_ras vcn_v4_0_ras = {
2200 .hw_ops = &vcn_v4_0_ras_hw_ops,
2201 .ras_late_init = amdgpu_vcn_ras_late_init,
2205 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
2207 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2208 case IP_VERSION(4, 0, 0):
2209 adev->vcn.ras = &vcn_v4_0_ras;