2 * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
30 #include "soc15_hw_ip.h"
33 #include "vcn/vcn_5_0_0_offset.h"
34 #include "vcn/vcn_5_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
36 #include "vcn_v5_0_0.h"
37 #include "vcn_v5_0_1.h"
39 #include <drm/drm_drv.h>
41 static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
42 static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
43 static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
44 enum amd_powergating_state state);
45 static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
48 * vcn_v5_0_1_early_init - set function pointers and load microcode
50 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
52 * Set ring and irq function pointers
53 * Load microcode from filesystem
55 static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
57 struct amdgpu_device *adev = ip_block->adev;
59 /* re-use enc ring as unified ring */
60 adev->vcn.num_enc_rings = 1;
62 vcn_v5_0_1_set_unified_ring_funcs(adev);
63 vcn_v5_0_1_set_irq_funcs(adev);
65 return amdgpu_vcn_early_init(adev);
69 * vcn_v5_0_1_sw_init - sw init for VCN block
71 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
73 * Load firmware and sw initialization
75 static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
77 struct amdgpu_device *adev = ip_block->adev;
78 struct amdgpu_ring *ring;
81 r = amdgpu_vcn_sw_init(adev);
85 amdgpu_vcn_setup_ucode(adev);
87 r = amdgpu_vcn_resume(adev);
91 /* VCN UNIFIED TRAP */
92 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
93 VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
97 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
98 volatile struct amdgpu_vcn5_fw_shared *fw_shared;
100 vcn_inst = GET_INST(VCN, i);
102 ring = &adev->vcn.inst[i].ring_enc[0];
103 ring->use_doorbell = true;
104 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
106 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
107 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
109 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
110 AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
114 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
115 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
116 fw_shared->sq.is_enabled = true;
118 if (amdgpu_vcnfw_log)
119 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
122 /* TODO: Add queue reset mask when FW fully supports it */
123 adev->vcn.supported_reset =
124 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
126 vcn_v5_0_0_alloc_ip_dump(adev);
128 return amdgpu_vcn_sysfs_reset_mask_init(adev);
132 * vcn_v5_0_1_sw_fini - sw fini for VCN block
134 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
136 * VCN suspend and free up sw allocation
138 static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
140 struct amdgpu_device *adev = ip_block->adev;
143 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
144 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
145 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
147 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
148 fw_shared->present_flag_0 = 0;
149 fw_shared->sq.is_enabled = 0;
155 r = amdgpu_vcn_suspend(adev);
159 r = amdgpu_vcn_sw_fini(adev);
161 amdgpu_vcn_sysfs_reset_mask_fini(adev);
163 kfree(adev->vcn.ip_dump);
169 * vcn_v5_0_1_hw_init - start and test VCN block
171 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
173 * Initialize the hardware, boot up the VCPU and do some testing
175 static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
177 struct amdgpu_device *adev = ip_block->adev;
178 struct amdgpu_ring *ring;
181 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
182 vcn_inst = GET_INST(VCN, i);
183 ring = &adev->vcn.inst[i].ring_enc[0];
185 if (ring->use_doorbell)
186 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
187 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
189 adev->vcn.inst[i].aid_id);
191 r = amdgpu_ring_test_helper(ring);
200 * vcn_v5_0_1_hw_fini - stop the hardware block
202 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
204 * Stop the VCN block, mark ring as not ready any more
206 static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
208 struct amdgpu_device *adev = ip_block->adev;
210 cancel_delayed_work_sync(&adev->vcn.idle_work);
216 * vcn_v5_0_1_suspend - suspend VCN block
218 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
220 * HW fini and suspend VCN block
222 static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
224 struct amdgpu_device *adev = ip_block->adev;
227 r = vcn_v5_0_1_hw_fini(ip_block);
231 r = amdgpu_vcn_suspend(adev);
237 * vcn_v5_0_1_resume - resume VCN block
239 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
241 * Resume firmware and hw init VCN block
243 static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
245 struct amdgpu_device *adev = ip_block->adev;
248 r = amdgpu_vcn_resume(adev);
252 r = vcn_v5_0_1_hw_init(ip_block);
258 * vcn_v5_0_1_mc_resume - memory controller programming
260 * @adev: amdgpu_device pointer
261 * @inst: instance number
263 * Let the VCN memory controller know it's offsets
265 static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
267 uint32_t offset, size, vcn_inst;
268 const struct common_firmware_header *hdr;
270 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
271 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
273 vcn_inst = GET_INST(VCN, inst);
274 /* cache window 0: fw */
275 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
276 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
277 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
278 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
279 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
280 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
283 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
284 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
285 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
286 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
288 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
289 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
291 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
293 /* cache window 1: stack */
294 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
295 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
296 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
297 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
298 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
299 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
301 /* cache window 2: context */
302 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
303 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
304 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
305 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
306 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
307 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
309 /* non-cache window */
310 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
311 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
312 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
313 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
314 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
315 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
316 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
320 * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
322 * @adev: amdgpu_device pointer
323 * @inst_idx: instance number index
324 * @indirect: indirectly write sram
326 * Let the VCN memory controller know it's offsets with dpg mode
328 static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
330 uint32_t offset, size;
331 const struct common_firmware_header *hdr;
333 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
334 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
336 /* cache window 0: fw */
337 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
339 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
340 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
341 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
342 inst_idx].tmr_mc_addr_lo), 0, indirect);
343 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
344 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
345 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
346 inst_idx].tmr_mc_addr_hi), 0, indirect);
347 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
348 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
350 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
351 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
352 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
353 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
354 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
355 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
359 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
360 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
361 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
362 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
363 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
364 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
366 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
367 VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
368 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
372 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
373 VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
375 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
376 VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
378 /* cache window 1: stack */
380 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
381 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
382 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
383 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
384 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
385 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
386 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
387 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
389 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
390 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
391 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
392 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
393 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
394 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
396 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
397 VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
399 /* cache window 2: context */
400 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
401 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
402 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
403 AMDGPU_VCN_STACK_SIZE), 0, indirect);
404 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
405 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
406 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
407 AMDGPU_VCN_STACK_SIZE), 0, indirect);
408 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
409 VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
410 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
411 VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
413 /* non-cache window */
414 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
415 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
416 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
417 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
418 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
419 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
420 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
421 VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
422 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
423 VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
424 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
426 /* VCN global tiling registers */
427 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
428 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
432 * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
434 * @adev: amdgpu_device pointer
435 * @inst: instance number
437 * Disable clock gating for VCN block
439 static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst)
444 * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
446 * @adev: amdgpu_device pointer
447 * @inst: instance number
449 * Enable clock gating for VCN block
451 static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst)
456 * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
458 * @adev: amdgpu_device pointer
459 * @inst_idx: instance number index
460 * @indirect: indirectly write sram
462 * Start VCN block with dpg mode
464 static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
466 volatile struct amdgpu_vcn4_fw_shared *fw_shared =
467 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
468 struct amdgpu_ring *ring;
472 vcn_inst = GET_INST(VCN, inst_idx);
474 /* disable register anti-hang mechanism */
475 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
476 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
478 /* enable dynamic power gating mode */
479 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
480 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
481 WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
484 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
485 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
486 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
487 WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
488 adev->vcn.inst[inst_idx].aid_id, 0, true);
491 /* enable VCPU clock */
492 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
493 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
494 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
495 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
497 /* disable master interrupt */
498 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
499 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
501 /* setup regUVD_LMI_CTRL */
502 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
503 UVD_LMI_CTRL__REQ_MODE_MASK |
504 UVD_LMI_CTRL__CRC_RESET_MASK |
505 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
506 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
507 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
508 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
510 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
511 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
513 vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
515 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
516 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
517 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
518 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
520 /* enable LMI MC and UMC channels */
521 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
522 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
523 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
525 /* enable master interrupt */
526 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
527 VCN, 0, regUVD_MASTINT_EN),
528 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
531 amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
533 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
535 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
536 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
537 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
539 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
540 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
541 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
542 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
543 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
544 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
546 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
547 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
548 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
550 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
551 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
552 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
553 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
555 WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
556 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
557 VCN_RB1_DB_CTRL__EN_MASK);
558 /* Read DB_CTRL to flush the write DB_CTRL command. */
559 RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
565 * vcn_v5_0_1_start - VCN start
567 * @adev: amdgpu_device pointer
571 static int vcn_v5_0_1_start(struct amdgpu_device *adev)
573 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
574 struct amdgpu_ring *ring;
576 int i, j, k, r, vcn_inst;
578 if (adev->pm.dpm_enabled)
579 amdgpu_dpm_enable_uvd(adev, true);
581 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
582 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
584 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
585 r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
589 vcn_inst = GET_INST(VCN, i);
591 /* set VCN status busy */
592 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
593 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
595 /* enable VCPU clock */
596 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
597 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
599 /* disable master interrupt */
600 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
601 ~UVD_MASTINT_EN__VCPU_EN_MASK);
603 /* enable LMI MC and UMC channels */
604 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
605 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
607 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
608 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
609 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
610 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
612 /* setup regUVD_LMI_CTRL */
613 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
614 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
615 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
616 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
617 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
618 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
620 vcn_v5_0_1_mc_resume(adev, i);
622 /* VCN global tiling registers */
623 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
624 adev->gfx.config.gb_addr_config);
626 /* unblock VCPU register access */
627 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
628 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
630 /* release VCPU reset to boot */
631 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
632 ~UVD_VCPU_CNTL__BLK_RST_MASK);
634 for (j = 0; j < 10; ++j) {
637 for (k = 0; k < 100; ++k) {
638 status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
642 if (amdgpu_emu_mode == 1)
646 if (amdgpu_emu_mode == 1) {
658 "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
659 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
660 UVD_VCPU_CNTL__BLK_RST_MASK,
661 ~UVD_VCPU_CNTL__BLK_RST_MASK);
663 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
664 ~UVD_VCPU_CNTL__BLK_RST_MASK);
672 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
676 /* enable master interrupt */
677 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
678 UVD_MASTINT_EN__VCPU_EN_MASK,
679 ~UVD_MASTINT_EN__VCPU_EN_MASK);
681 /* clear the busy bit of VCN_STATUS */
682 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
683 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
685 ring = &adev->vcn.inst[i].ring_enc[0];
687 WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
688 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
689 VCN_RB1_DB_CTRL__EN_MASK);
691 /* Read DB_CTRL to flush the write DB_CTRL command. */
692 RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
694 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
695 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
696 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
698 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
699 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
700 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
701 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
702 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
703 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
705 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
706 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
707 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
709 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
710 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
711 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
712 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
719 * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
721 * @adev: amdgpu_device pointer
722 * @inst_idx: instance number index
724 * Stop VCN block with dpg mode
726 static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
731 vcn_inst = GET_INST(VCN, inst_idx);
733 /* Wait for power status to be 1 */
734 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
735 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
737 /* wait for read ptr to be equal to write ptr */
738 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
739 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
741 /* disable dynamic power gating mode */
742 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
743 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
747 * vcn_v5_0_1_stop - VCN stop
749 * @adev: amdgpu_device pointer
753 static int vcn_v5_0_1_stop(struct amdgpu_device *adev)
755 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
757 int i, r = 0, vcn_inst;
759 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
760 vcn_inst = GET_INST(VCN, i);
762 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
763 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
765 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
766 vcn_v5_0_1_stop_dpg_mode(adev, i);
770 /* wait for vcn idle */
771 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
775 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
776 UVD_LMI_STATUS__READ_CLEAN_MASK |
777 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
778 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
779 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
783 /* disable LMI UMC channel */
784 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
785 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
786 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
787 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
788 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
789 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
793 /* block VCPU register access */
794 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
795 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
796 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
799 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
800 UVD_VCPU_CNTL__BLK_RST_MASK,
801 ~UVD_VCPU_CNTL__BLK_RST_MASK);
803 /* disable VCPU clock */
804 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
805 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
807 /* apply soft reset */
808 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
809 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
810 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
811 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
812 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
813 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
816 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
819 if (adev->pm.dpm_enabled)
820 amdgpu_dpm_enable_uvd(adev, false);
826 * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer
828 * @ring: amdgpu_ring pointer
830 * Returns the current hardware unified read pointer
832 static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring)
834 struct amdgpu_device *adev = ring->adev;
836 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
837 DRM_ERROR("wrong ring id is identified in %s", __func__);
839 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
843 * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer
845 * @ring: amdgpu_ring pointer
847 * Returns the current hardware unified write pointer
849 static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring)
851 struct amdgpu_device *adev = ring->adev;
853 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
854 DRM_ERROR("wrong ring id is identified in %s", __func__);
856 if (ring->use_doorbell)
857 return *ring->wptr_cpu_addr;
859 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
863 * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer
865 * @ring: amdgpu_ring pointer
867 * Commits the enc write pointer to the hardware
869 static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
871 struct amdgpu_device *adev = ring->adev;
873 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
874 DRM_ERROR("wrong ring id is identified in %s", __func__);
876 if (ring->use_doorbell) {
877 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
878 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
880 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
881 lower_32_bits(ring->wptr));
885 static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
886 .type = AMDGPU_RING_TYPE_VCN_ENC,
888 .nop = VCN_ENC_CMD_NO_OP,
889 .get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
890 .get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
891 .set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
893 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
894 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
895 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
896 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
897 1, /* vcn_v2_0_enc_ring_insert_end */
898 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
899 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
900 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
901 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
902 .test_ring = amdgpu_vcn_enc_ring_test_ring,
903 .test_ib = amdgpu_vcn_unified_ring_test_ib,
904 .insert_nop = amdgpu_ring_insert_nop,
905 .insert_end = vcn_v2_0_enc_ring_insert_end,
906 .pad_ib = amdgpu_ring_generic_pad_ib,
907 .begin_use = amdgpu_vcn_ring_begin_use,
908 .end_use = amdgpu_vcn_ring_end_use,
909 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
910 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
911 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
915 * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions
917 * @adev: amdgpu_device pointer
919 * Set unified ring functions
921 static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
925 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
926 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs;
927 adev->vcn.inst[i].ring_enc[0].me = i;
928 vcn_inst = GET_INST(VCN, i);
929 adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
934 * vcn_v5_0_1_is_idle - check VCN block is idle
936 * @handle: amdgpu_device pointer
938 * Check whether VCN block is idle
940 static bool vcn_v5_0_1_is_idle(void *handle)
942 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
945 for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
946 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
952 * vcn_v5_0_1_wait_for_idle - wait for VCN block idle
954 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
956 * Wait for VCN block idle
958 static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
960 struct amdgpu_device *adev = ip_block->adev;
963 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
964 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
974 * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state
976 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
977 * @state: clock gating state
979 * Set VCN block clockgating state
981 static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
982 enum amd_clockgating_state state)
984 struct amdgpu_device *adev = ip_block->adev;
985 bool enable = state == AMD_CG_STATE_GATE;
988 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
990 if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
992 vcn_v5_0_1_enable_clock_gating(adev, i);
994 vcn_v5_0_1_disable_clock_gating(adev, i);
1002 * vcn_v5_0_1_set_powergating_state - set VCN block powergating state
1004 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1005 * @state: power gating state
1007 * Set VCN block powergating state
1009 static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
1010 enum amd_powergating_state state)
1012 struct amdgpu_device *adev = ip_block->adev;
1015 if (state == adev->vcn.cur_state)
1018 if (state == AMD_PG_STATE_GATE)
1019 ret = vcn_v5_0_1_stop(adev);
1021 ret = vcn_v5_0_1_start(adev);
1024 adev->vcn.cur_state = state;
1030 * vcn_v5_0_1_process_interrupt - process VCN block interrupt
1032 * @adev: amdgpu_device pointer
1033 * @source: interrupt sources
1034 * @entry: interrupt entry from clients and sources
1036 * Process VCN block interrupt
1038 static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1039 struct amdgpu_iv_entry *entry)
1043 i = node_id_to_phys_map[entry->node_id];
1045 DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1047 for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1048 if (adev->vcn.inst[inst].aid_id == i)
1050 if (inst >= adev->vcn.num_vcn_inst) {
1051 dev_WARN_ONCE(adev->dev, 1,
1052 "Interrupt received for unknown VCN instance %d",
1057 switch (entry->src_id) {
1058 case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1059 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1062 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1063 entry->src_id, entry->src_data[0]);
1070 static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
1071 .process = vcn_v5_0_1_process_interrupt,
1075 * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
1077 * @adev: amdgpu_device pointer
1079 * Set VCN block interrupt irq functions
1081 static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
1085 for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
1086 adev->vcn.inst->irq.num_types++;
1087 adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
1090 static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
1091 .name = "vcn_v5_0_1",
1092 .early_init = vcn_v5_0_1_early_init,
1094 .sw_init = vcn_v5_0_1_sw_init,
1095 .sw_fini = vcn_v5_0_1_sw_fini,
1096 .hw_init = vcn_v5_0_1_hw_init,
1097 .hw_fini = vcn_v5_0_1_hw_fini,
1098 .suspend = vcn_v5_0_1_suspend,
1099 .resume = vcn_v5_0_1_resume,
1100 .is_idle = vcn_v5_0_1_is_idle,
1101 .wait_for_idle = vcn_v5_0_1_wait_for_idle,
1102 .check_soft_reset = NULL,
1103 .pre_soft_reset = NULL,
1105 .post_soft_reset = NULL,
1106 .set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
1107 .set_powergating_state = vcn_v5_0_1_set_powergating_state,
1108 .dump_ip_state = vcn_v5_0_0_dump_ip_state,
1109 .print_ip_state = vcn_v5_0_0_print_ip_state,
1112 const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
1113 .type = AMD_IP_BLOCK_TYPE_VCN,
1117 .funcs = &vcn_v5_0_1_ip_funcs,