2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <drm/drm_drv.h>
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_vpe.h"
29 #include "soc15_common.h"
32 #define AMDGPU_CSA_VPE_SIZE 64
33 /* VPE CSA resides in the 4th page of CSA */
34 #define AMDGPU_CSA_VPE_OFFSET (4096 * 3)
36 static void vpe_set_ring_funcs(struct amdgpu_device *adev);
38 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
40 struct amdgpu_firmware_info ucode = {
41 .ucode_id = AMDGPU_UCODE_ID_VPE,
42 .mc_addr = adev->vpe.cmdbuf_gpu_addr,
46 return psp_execute_ip_fw_load(&adev->psp, &ucode);
49 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
51 struct amdgpu_device *adev = vpe->ring.adev;
52 const struct vpe_firmware_header_v1_0 *vpe_hdr;
53 char fw_prefix[32], fw_name[64];
56 amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
57 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", fw_prefix);
59 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, fw_name);
63 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
64 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version);
65 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version);
67 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
68 struct amdgpu_firmware_info *info;
70 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX];
71 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX;
72 info->fw = adev->vpe.fw;
73 adev->firmware.fw_size +=
74 ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
76 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL];
77 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL;
78 info->fw = adev->vpe.fw;
79 adev->firmware.fw_size +=
80 ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
85 dev_err(adev->dev, "fail to initialize vpe microcode\n");
86 release_firmware(adev->vpe.fw);
91 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe)
93 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
94 struct amdgpu_ring *ring = &vpe->ring;
97 ring->ring_obj = NULL;
98 ring->use_doorbell = true;
99 ring->vm_hub = AMDGPU_MMHUB0(0);
100 ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1);
101 snprintf(ring->name, 4, "vpe");
103 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0,
104 AMDGPU_RING_PRIO_DEFAULT, NULL);
111 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
113 amdgpu_ring_fini(&vpe->ring);
118 static int vpe_early_init(void *handle)
120 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
121 struct amdgpu_vpe *vpe = &adev->vpe;
123 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
124 case IP_VERSION(6, 1, 0):
125 vpe_v6_1_set_funcs(vpe);
131 vpe_set_ring_funcs(adev);
138 static int vpe_common_init(struct amdgpu_vpe *vpe)
140 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
143 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
144 AMDGPU_GEM_DOMAIN_GTT,
145 &adev->vpe.cmdbuf_obj,
146 &adev->vpe.cmdbuf_gpu_addr,
147 (void **)&adev->vpe.cmdbuf_cpu_addr);
149 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r);
156 static int vpe_sw_init(void *handle)
158 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
159 struct amdgpu_vpe *vpe = &adev->vpe;
162 ret = vpe_common_init(vpe);
166 ret = vpe_irq_init(vpe);
170 ret = vpe_ring_init(vpe);
174 ret = vpe_init_microcode(vpe);
181 static int vpe_sw_fini(void *handle)
183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
184 struct amdgpu_vpe *vpe = &adev->vpe;
186 release_firmware(vpe->fw);
191 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
192 &adev->vpe.cmdbuf_gpu_addr,
193 (void **)&adev->vpe.cmdbuf_cpu_addr);
198 static int vpe_hw_init(void *handle)
200 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
201 struct amdgpu_vpe *vpe = &adev->vpe;
204 ret = vpe_load_microcode(vpe);
208 ret = vpe_ring_start(vpe);
215 static int vpe_hw_fini(void *handle)
217 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
218 struct amdgpu_vpe *vpe = &adev->vpe;
225 static int vpe_suspend(void *handle)
227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
229 return vpe_hw_fini(adev);
232 static int vpe_resume(void *handle)
234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
236 return vpe_hw_init(adev);
239 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
243 amdgpu_ring_write(ring, ring->funcs->nop |
244 VPE_CMD_NOP_HEADER_COUNT(count - 1));
246 for (i = 0; i < count - 1; i++)
247 amdgpu_ring_write(ring, 0);
250 static void vpe_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
255 pad_count = (-ib->length_dw) & 0x7;
257 ib->ptr[ib->length_dw++] = ring->funcs->nop |
258 VPE_CMD_NOP_HEADER_COUNT(pad_count - 1);
260 for (i = 0; i < pad_count - 1; i++)
261 ib->ptr[ib->length_dw++] = 0;
265 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid)
267 struct amdgpu_device *adev = ring->adev;
269 uint64_t csa_mc_addr;
271 if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
274 csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET +
275 index * AMDGPU_CSA_VPE_SIZE;
280 static void vpe_ring_emit_ib(struct amdgpu_ring *ring,
281 struct amdgpu_job *job,
282 struct amdgpu_ib *ib,
285 uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
286 uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid);
288 /* IB packet must end on a 8 DW boundary */
289 vpe_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
291 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) |
292 VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf));
294 /* base must be 32 byte aligned */
295 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0);
296 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
297 amdgpu_ring_write(ring, ib->length_dw);
298 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
299 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
302 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
303 uint64_t seq, unsigned int flags)
308 /* write the fence */
309 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
310 /* zero in first two bits */
311 WARN_ON_ONCE(addr & 0x3);
312 amdgpu_ring_write(ring, lower_32_bits(addr));
313 amdgpu_ring_write(ring, upper_32_bits(addr));
314 amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq));
316 } while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1));
318 if (flags & AMDGPU_FENCE_FLAG_INT) {
319 /* generate an interrupt */
320 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0));
321 amdgpu_ring_write(ring, 0);
326 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
328 uint32_t seq = ring->fence_drv.sync_seq;
329 uint64_t addr = ring->fence_drv.gpu_addr;
332 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
333 VPE_POLL_REGMEM_SUBOP_REGMEM) |
334 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
335 VPE_CMD_POLL_REGMEM_HEADER_MEM(1));
336 amdgpu_ring_write(ring, addr & 0xfffffffc);
337 amdgpu_ring_write(ring, upper_32_bits(addr));
338 amdgpu_ring_write(ring, seq); /* reference */
339 amdgpu_ring_write(ring, 0xffffffff); /* mask */
340 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
341 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4));
344 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
346 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0));
347 amdgpu_ring_write(ring, reg << 2);
348 amdgpu_ring_write(ring, val);
351 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
352 uint32_t val, uint32_t mask)
354 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
355 VPE_POLL_REGMEM_SUBOP_REGMEM) |
356 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
357 VPE_CMD_POLL_REGMEM_HEADER_MEM(0));
358 amdgpu_ring_write(ring, reg << 2);
359 amdgpu_ring_write(ring, 0);
360 amdgpu_ring_write(ring, val); /* reference */
361 amdgpu_ring_write(ring, mask); /* mask */
362 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
363 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10));
366 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid,
369 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
372 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring)
376 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
377 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
378 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
379 amdgpu_ring_write(ring, 1);
380 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
381 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
386 static void vpe_ring_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset)
390 WARN_ON_ONCE(offset > ring->buf_mask);
391 WARN_ON_ONCE(ring->ring[offset] != 0x55aa55aa);
393 cur = (ring->wptr - 1) & ring->buf_mask;
395 ring->ring[offset] = cur - offset;
397 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
400 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
402 struct amdgpu_device *adev = ring->adev;
403 struct amdgpu_vpe *vpe = &adev->vpe;
404 uint32_t preempt_reg = vpe->regs.queue0_preempt;
407 /* assert preemption condition */
408 amdgpu_ring_set_preempt_cond_exec(ring, false);
410 /* emit the trailing fence */
411 ring->trail_seq += 1;
412 amdgpu_ring_alloc(ring, 10);
413 vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0);
414 amdgpu_ring_commit(ring);
416 /* assert IB preemption */
417 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1);
419 /* poll the trailing fence */
420 for (i = 0; i < adev->usec_timeout; i++) {
421 if (ring->trail_seq ==
422 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
427 if (i >= adev->usec_timeout) {
429 dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx);
432 /* deassert IB preemption */
433 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0);
435 /* deassert the preemption condition */
436 amdgpu_ring_set_preempt_cond_exec(ring, true);
441 static int vpe_set_clockgating_state(void *handle,
442 enum amd_clockgating_state state)
447 static int vpe_set_powergating_state(void *handle,
448 enum amd_powergating_state state)
453 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring)
455 struct amdgpu_device *adev = ring->adev;
456 struct amdgpu_vpe *vpe = &adev->vpe;
459 if (ring->use_doorbell) {
460 rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr);
461 dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr);
463 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi));
465 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo));
466 dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr);
472 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring)
474 struct amdgpu_device *adev = ring->adev;
475 struct amdgpu_vpe *vpe = &adev->vpe;
478 if (ring->use_doorbell) {
479 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
480 dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr);
482 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi));
484 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo));
485 dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr);
491 static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
493 struct amdgpu_device *adev = ring->adev;
494 struct amdgpu_vpe *vpe = &adev->vpe;
496 if (ring->use_doorbell) {
497 dev_dbg(adev->dev, "Using doorbell, \
498 wptr_offs == 0x%08x, \
499 lower_32_bits(ring->wptr) << 2 == 0x%08x, \
500 upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
502 lower_32_bits(ring->wptr << 2),
503 upper_32_bits(ring->wptr << 2));
504 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
505 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
507 dev_dbg(adev->dev, "Not using doorbell, \
508 regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
509 regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
510 lower_32_bits(ring->wptr << 2),
511 upper_32_bits(ring->wptr << 2));
512 WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo),
513 lower_32_bits(ring->wptr << 2));
514 WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi),
515 upper_32_bits(ring->wptr << 2));
519 static int vpe_ring_test_ring(struct amdgpu_ring *ring)
521 struct amdgpu_device *adev = ring->adev;
522 const uint32_t test_pattern = 0xdeadbeef;
527 ret = amdgpu_device_wb_get(adev, &index);
529 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
533 adev->wb.wb[index] = 0;
534 wb_addr = adev->wb.gpu_addr + (index * 4);
536 ret = amdgpu_ring_alloc(ring, 4);
538 dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret);
542 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
543 amdgpu_ring_write(ring, lower_32_bits(wb_addr));
544 amdgpu_ring_write(ring, upper_32_bits(wb_addr));
545 amdgpu_ring_write(ring, test_pattern);
546 amdgpu_ring_commit(ring);
548 for (i = 0; i < adev->usec_timeout; i++) {
549 if (le32_to_cpu(adev->wb.wb[index]) == test_pattern)
556 amdgpu_device_wb_free(adev, index);
561 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
563 struct amdgpu_device *adev = ring->adev;
564 const uint32_t test_pattern = 0xdeadbeef;
565 struct amdgpu_ib ib = {};
566 struct dma_fence *f = NULL;
571 ret = amdgpu_device_wb_get(adev, &index);
573 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
577 adev->wb.wb[index] = 0;
578 wb_addr = adev->wb.gpu_addr + (index * 4);
580 ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
584 ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
585 ib.ptr[1] = lower_32_bits(wb_addr);
586 ib.ptr[2] = upper_32_bits(wb_addr);
587 ib.ptr[3] = test_pattern;
588 ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
589 ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
590 ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
591 ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
594 ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
598 ret = dma_fence_wait_timeout(f, false, timeout);
600 ret = ret ? : -ETIMEDOUT;
604 ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
607 amdgpu_ib_free(adev, &ib, NULL);
610 amdgpu_device_wb_free(adev, index);
615 static const struct amdgpu_ring_funcs vpe_ring_funcs = {
616 .type = AMDGPU_RING_TYPE_VPE,
618 .nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0),
619 .support_64bit_ptrs = true,
620 .get_rptr = vpe_ring_get_rptr,
621 .get_wptr = vpe_ring_get_wptr,
622 .set_wptr = vpe_ring_set_wptr,
624 5 + /* vpe_ring_init_cond_exec */
625 6 + /* vpe_ring_emit_pipeline_sync */
626 10 + 10 + 10 + /* vpe_ring_emit_fence */
627 /* vpe_ring_emit_vm_flush */
628 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
629 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6,
630 .emit_ib_size = 7 + 6,
631 .emit_ib = vpe_ring_emit_ib,
632 .emit_pipeline_sync = vpe_ring_emit_pipeline_sync,
633 .emit_fence = vpe_ring_emit_fence,
634 .emit_vm_flush = vpe_ring_emit_vm_flush,
635 .emit_wreg = vpe_ring_emit_wreg,
636 .emit_reg_wait = vpe_ring_emit_reg_wait,
637 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
638 .insert_nop = vpe_ring_insert_nop,
639 .pad_ib = vpe_ring_pad_ib,
640 .test_ring = vpe_ring_test_ring,
641 .test_ib = vpe_ring_test_ib,
642 .init_cond_exec = vpe_ring_init_cond_exec,
643 .patch_cond_exec = vpe_ring_patch_cond_exec,
644 .preempt_ib = vpe_ring_preempt_ib,
647 static void vpe_set_ring_funcs(struct amdgpu_device *adev)
649 adev->vpe.ring.funcs = &vpe_ring_funcs;
652 const struct amd_ip_funcs vpe_ip_funcs = {
654 .early_init = vpe_early_init,
656 .sw_init = vpe_sw_init,
657 .sw_fini = vpe_sw_fini,
658 .hw_init = vpe_hw_init,
659 .hw_fini = vpe_hw_fini,
660 .suspend = vpe_suspend,
661 .resume = vpe_resume,
663 .set_clockgating_state = vpe_set_clockgating_state,
664 .set_powergating_state = vpe_set_powergating_state,
667 const struct amdgpu_ip_block_version vpe_v6_1_ip_block = {
668 .type = AMD_IP_BLOCK_TYPE_VPE,
672 .funcs = &vpe_ip_funcs,