2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <drm/drm_drv.h>
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_vpe.h"
29 #include "amdgpu_smu.h"
30 #include "soc15_common.h"
33 #define AMDGPU_CSA_VPE_SIZE 64
34 /* VPE CSA resides in the 4th page of CSA */
35 #define AMDGPU_CSA_VPE_OFFSET (4096 * 3)
37 /* 1 second timeout */
38 #define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000)
40 #define VPE_MAX_DPM_LEVEL 4
41 #define FIXED1_8_BITS_PER_FRACTIONAL_PART 8
42 #define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART)
44 static void vpe_set_ring_funcs(struct amdgpu_device *adev);
46 static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder)
48 *remainder = dividend % divisor;
49 return dividend / divisor;
52 static inline uint16_t complete_integer_division_u16(
57 return div16_u16_rem(dividend, divisor, (uint16_t *)remainder);
60 static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
62 u16 arg1_value = numerator;
63 u16 arg2_value = denominator;
67 /* determine integer part */
68 uint16_t res_value = complete_integer_division_u16(
69 arg1_value, arg2_value, &remainder);
71 if (res_value > 127 /* CHAR_MAX */)
74 /* determine fractional part */
76 unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART;
83 if (remainder >= arg2_value) {
85 remainder -= arg2_value;
92 uint16_t summand = (remainder << 1) >= arg2_value;
94 if ((res_value + summand) > 32767 /* SHRT_MAX */)
103 static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency)
105 uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency);
107 if (GET_PRATIO_INTEGER_PART(pratio) > 1)
114 * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
115 * VPE FW will dynamically decide which level should be used according to current loading.
117 * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
118 * calculate the ratios of adjusting from one clock to another.
119 * The VPE FW can then request the appropriate frequency from the PMFW.
121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe)
123 struct amdgpu_device *adev = vpe->ring.adev;
126 if (adev->pm.dpm_enabled) {
127 struct dpm_clocks clock_table = { 0 };
128 struct dpm_clock *VPEClks;
129 struct dpm_clock *SOCClks;
131 uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0;
132 uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0;
134 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
135 dpm_ctl |= 1; /* DPM enablement */
136 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
138 /* Get VPECLK and SOCCLK */
139 if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) {
140 dev_dbg(adev->dev, "%s: get clock failed!\n", __func__);
144 SOCClks = clock_table.SocClocks;
145 VPEClks = clock_table.VPEClocks;
147 /* vpe dpm only cares 4 levels. */
148 for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) {
149 uint32_t soc_dpm_level;
155 soc_dpm_level = (idx * 2) + 1;
157 /* clamp the max level */
158 if (soc_dpm_level > PP_SMU_NUM_VPECLK_DPM_LEVELS - 1)
159 soc_dpm_level = PP_SMU_NUM_VPECLK_DPM_LEVELS - 1;
161 min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ?
162 SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq;
166 pratio_vmin_freq = min_freq;
169 pratio_vmid_freq = min_freq;
172 pratio_vnorm_freq = min_freq;
175 pratio_vmax_freq = min_freq;
182 if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) {
185 pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq);
186 pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq);
187 pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq);
189 pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18);
190 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */
191 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */
192 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */
193 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */
194 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */
195 dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__);
197 dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__);
204 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
205 dpm_ctl &= 0xfffffffe; /* Disable DPM */
206 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
207 dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
211 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
213 struct amdgpu_firmware_info ucode = {
214 .ucode_id = AMDGPU_UCODE_ID_VPE,
215 .mc_addr = adev->vpe.cmdbuf_gpu_addr,
219 return psp_execute_ip_fw_load(&adev->psp, &ucode);
222 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
224 struct amdgpu_device *adev = vpe->ring.adev;
225 const struct vpe_firmware_header_v1_0 *vpe_hdr;
226 char fw_prefix[32], fw_name[64];
229 amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
230 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", fw_prefix);
232 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, fw_name);
236 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
237 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version);
238 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version);
240 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
241 struct amdgpu_firmware_info *info;
243 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX];
244 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX;
245 info->fw = adev->vpe.fw;
246 adev->firmware.fw_size +=
247 ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
249 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL];
250 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL;
251 info->fw = adev->vpe.fw;
252 adev->firmware.fw_size +=
253 ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
258 dev_err(adev->dev, "fail to initialize vpe microcode\n");
259 release_firmware(adev->vpe.fw);
264 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe)
266 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
267 struct amdgpu_ring *ring = &vpe->ring;
270 ring->ring_obj = NULL;
271 ring->use_doorbell = true;
272 ring->vm_hub = AMDGPU_MMHUB0(0);
273 ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1);
274 snprintf(ring->name, 4, "vpe");
276 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0,
277 AMDGPU_RING_PRIO_DEFAULT, NULL);
284 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
286 amdgpu_ring_fini(&vpe->ring);
291 static int vpe_early_init(void *handle)
293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
294 struct amdgpu_vpe *vpe = &adev->vpe;
296 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
297 case IP_VERSION(6, 1, 0):
298 vpe_v6_1_set_funcs(vpe);
300 case IP_VERSION(6, 1, 1):
301 vpe_v6_1_set_funcs(vpe);
302 vpe->collaborate_mode = true;
308 vpe_set_ring_funcs(adev);
311 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false");
316 static void vpe_idle_work_handler(struct work_struct *work)
318 struct amdgpu_device *adev =
319 container_of(work, struct amdgpu_device, vpe.idle_work.work);
320 unsigned int fences = 0;
322 fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
325 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
327 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
330 static int vpe_common_init(struct amdgpu_vpe *vpe)
332 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
335 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
336 AMDGPU_GEM_DOMAIN_GTT,
337 &adev->vpe.cmdbuf_obj,
338 &adev->vpe.cmdbuf_gpu_addr,
339 (void **)&adev->vpe.cmdbuf_cpu_addr);
341 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r);
345 vpe->context_started = false;
346 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler);
351 static int vpe_sw_init(void *handle)
353 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
354 struct amdgpu_vpe *vpe = &adev->vpe;
357 ret = vpe_common_init(vpe);
361 ret = vpe_irq_init(vpe);
365 ret = vpe_ring_init(vpe);
369 ret = vpe_init_microcode(vpe);
376 static int vpe_sw_fini(void *handle)
378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379 struct amdgpu_vpe *vpe = &adev->vpe;
381 release_firmware(vpe->fw);
386 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
387 &adev->vpe.cmdbuf_gpu_addr,
388 (void **)&adev->vpe.cmdbuf_cpu_addr);
393 static int vpe_hw_init(void *handle)
395 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
396 struct amdgpu_vpe *vpe = &adev->vpe;
399 ret = vpe_load_microcode(vpe);
403 ret = vpe_ring_start(vpe);
410 static int vpe_hw_fini(void *handle)
412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
413 struct amdgpu_vpe *vpe = &adev->vpe;
418 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
423 static int vpe_suspend(void *handle)
425 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 cancel_delayed_work_sync(&adev->vpe.idle_work);
429 return vpe_hw_fini(adev);
432 static int vpe_resume(void *handle)
434 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
436 return vpe_hw_init(adev);
439 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
443 for (i = 0; i < count; i++)
445 amdgpu_ring_write(ring, ring->funcs->nop |
446 VPE_CMD_NOP_HEADER_COUNT(count - 1));
448 amdgpu_ring_write(ring, ring->funcs->nop);
451 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid)
453 struct amdgpu_device *adev = ring->adev;
455 uint64_t csa_mc_addr;
457 if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
460 csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET +
461 index * AMDGPU_CSA_VPE_SIZE;
466 static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring,
467 uint32_t device_select,
470 if (!ring->adev->vpe.collaborate_mode)
473 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) |
474 (device_select << 16));
475 amdgpu_ring_write(ring, exec_count & 0x1fff);
478 static void vpe_ring_emit_ib(struct amdgpu_ring *ring,
479 struct amdgpu_job *job,
480 struct amdgpu_ib *ib,
483 uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
484 uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid);
486 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) |
487 VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf));
489 /* base must be 32 byte aligned */
490 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0);
491 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
492 amdgpu_ring_write(ring, ib->length_dw);
493 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
494 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
497 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
498 uint64_t seq, unsigned int flags)
503 /* write the fence */
504 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
505 /* zero in first two bits */
506 WARN_ON_ONCE(addr & 0x3);
507 amdgpu_ring_write(ring, lower_32_bits(addr));
508 amdgpu_ring_write(ring, upper_32_bits(addr));
509 amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq));
511 } while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1));
513 if (flags & AMDGPU_FENCE_FLAG_INT) {
514 /* generate an interrupt */
515 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0));
516 amdgpu_ring_write(ring, 0);
521 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
523 uint32_t seq = ring->fence_drv.sync_seq;
524 uint64_t addr = ring->fence_drv.gpu_addr;
526 vpe_ring_emit_pred_exec(ring, 0, 6);
529 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
530 VPE_POLL_REGMEM_SUBOP_REGMEM) |
531 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
532 VPE_CMD_POLL_REGMEM_HEADER_MEM(1));
533 amdgpu_ring_write(ring, addr & 0xfffffffc);
534 amdgpu_ring_write(ring, upper_32_bits(addr));
535 amdgpu_ring_write(ring, seq); /* reference */
536 amdgpu_ring_write(ring, 0xffffffff); /* mask */
537 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
538 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4));
541 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
543 vpe_ring_emit_pred_exec(ring, 0, 3);
545 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0));
546 amdgpu_ring_write(ring, reg << 2);
547 amdgpu_ring_write(ring, val);
550 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
551 uint32_t val, uint32_t mask)
553 vpe_ring_emit_pred_exec(ring, 0, 6);
555 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
556 VPE_POLL_REGMEM_SUBOP_REGMEM) |
557 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
558 VPE_CMD_POLL_REGMEM_HEADER_MEM(0));
559 amdgpu_ring_write(ring, reg << 2);
560 amdgpu_ring_write(ring, 0);
561 amdgpu_ring_write(ring, val); /* reference */
562 amdgpu_ring_write(ring, mask); /* mask */
563 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
564 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10));
567 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid,
570 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
573 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
578 if (ring->adev->vpe.collaborate_mode)
581 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
582 amdgpu_ring_write(ring, lower_32_bits(addr));
583 amdgpu_ring_write(ring, upper_32_bits(addr));
584 amdgpu_ring_write(ring, 1);
585 ret = ring->wptr & ring->buf_mask;
586 amdgpu_ring_write(ring, 0);
591 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
593 struct amdgpu_device *adev = ring->adev;
594 struct amdgpu_vpe *vpe = &adev->vpe;
595 uint32_t preempt_reg = vpe->regs.queue0_preempt;
598 /* assert preemption condition */
599 amdgpu_ring_set_preempt_cond_exec(ring, false);
601 /* emit the trailing fence */
602 ring->trail_seq += 1;
603 amdgpu_ring_alloc(ring, 10);
604 vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0);
605 amdgpu_ring_commit(ring);
607 /* assert IB preemption */
608 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1);
610 /* poll the trailing fence */
611 for (i = 0; i < adev->usec_timeout; i++) {
612 if (ring->trail_seq ==
613 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
618 if (i >= adev->usec_timeout) {
620 dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx);
623 /* deassert IB preemption */
624 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0);
626 /* deassert the preemption condition */
627 amdgpu_ring_set_preempt_cond_exec(ring, true);
632 static int vpe_set_clockgating_state(void *handle,
633 enum amd_clockgating_state state)
638 static int vpe_set_powergating_state(void *handle,
639 enum amd_powergating_state state)
641 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
642 struct amdgpu_vpe *vpe = &adev->vpe;
644 if (!adev->pm.dpm_enabled)
645 dev_err(adev->dev, "Without PM, cannot support powergating\n");
647 dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE");
649 if (state == AMD_PG_STATE_GATE) {
650 amdgpu_dpm_enable_vpe(adev, false);
651 vpe->context_started = false;
653 amdgpu_dpm_enable_vpe(adev, true);
659 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring)
661 struct amdgpu_device *adev = ring->adev;
662 struct amdgpu_vpe *vpe = &adev->vpe;
665 if (ring->use_doorbell) {
666 rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr);
667 dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr);
669 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi));
671 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo));
672 dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr);
678 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring)
680 struct amdgpu_device *adev = ring->adev;
681 struct amdgpu_vpe *vpe = &adev->vpe;
684 if (ring->use_doorbell) {
685 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
686 dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr);
688 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi));
690 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo));
691 dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr);
697 static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
699 struct amdgpu_device *adev = ring->adev;
700 struct amdgpu_vpe *vpe = &adev->vpe;
702 if (ring->use_doorbell) {
703 dev_dbg(adev->dev, "Using doorbell, \
704 wptr_offs == 0x%08x, \
705 lower_32_bits(ring->wptr) << 2 == 0x%08x, \
706 upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
708 lower_32_bits(ring->wptr << 2),
709 upper_32_bits(ring->wptr << 2));
710 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
711 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
712 if (vpe->collaborate_mode)
713 WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2);
717 for (i = 0; i < vpe->num_instances; i++) {
718 dev_dbg(adev->dev, "Not using doorbell, \
719 regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
720 regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
721 lower_32_bits(ring->wptr << 2),
722 upper_32_bits(ring->wptr << 2));
723 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo),
724 lower_32_bits(ring->wptr << 2));
725 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi),
726 upper_32_bits(ring->wptr << 2));
731 static int vpe_ring_test_ring(struct amdgpu_ring *ring)
733 struct amdgpu_device *adev = ring->adev;
734 const uint32_t test_pattern = 0xdeadbeef;
739 ret = amdgpu_device_wb_get(adev, &index);
741 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
745 adev->wb.wb[index] = 0;
746 wb_addr = adev->wb.gpu_addr + (index * 4);
748 ret = amdgpu_ring_alloc(ring, 4);
750 dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret);
754 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
755 amdgpu_ring_write(ring, lower_32_bits(wb_addr));
756 amdgpu_ring_write(ring, upper_32_bits(wb_addr));
757 amdgpu_ring_write(ring, test_pattern);
758 amdgpu_ring_commit(ring);
760 for (i = 0; i < adev->usec_timeout; i++) {
761 if (le32_to_cpu(adev->wb.wb[index]) == test_pattern)
768 amdgpu_device_wb_free(adev, index);
773 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
775 struct amdgpu_device *adev = ring->adev;
776 const uint32_t test_pattern = 0xdeadbeef;
777 struct amdgpu_ib ib = {};
778 struct dma_fence *f = NULL;
783 ret = amdgpu_device_wb_get(adev, &index);
785 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
789 adev->wb.wb[index] = 0;
790 wb_addr = adev->wb.gpu_addr + (index * 4);
792 ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
796 ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
797 ib.ptr[1] = lower_32_bits(wb_addr);
798 ib.ptr[2] = upper_32_bits(wb_addr);
799 ib.ptr[3] = test_pattern;
800 ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
801 ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
802 ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
803 ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
806 ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
810 ret = dma_fence_wait_timeout(f, false, timeout);
812 ret = ret ? : -ETIMEDOUT;
816 ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
819 amdgpu_ib_free(adev, &ib, NULL);
822 amdgpu_device_wb_free(adev, index);
827 static void vpe_ring_begin_use(struct amdgpu_ring *ring)
829 struct amdgpu_device *adev = ring->adev;
830 struct amdgpu_vpe *vpe = &adev->vpe;
832 cancel_delayed_work_sync(&adev->vpe.idle_work);
834 /* Power on VPE and notify VPE of new context */
835 if (!vpe->context_started) {
836 uint32_t context_notify;
839 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE);
841 /* Indicates that a job from a new context has been submitted. */
842 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator));
843 if ((context_notify & 0x1) == 0)
844 context_notify |= 0x1;
846 context_notify &= ~(0x1);
847 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify);
848 vpe->context_started = true;
852 static void vpe_ring_end_use(struct amdgpu_ring *ring)
854 struct amdgpu_device *adev = ring->adev;
856 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
859 static const struct amdgpu_ring_funcs vpe_ring_funcs = {
860 .type = AMDGPU_RING_TYPE_VPE,
862 .nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0),
863 .support_64bit_ptrs = true,
864 .get_rptr = vpe_ring_get_rptr,
865 .get_wptr = vpe_ring_get_wptr,
866 .set_wptr = vpe_ring_set_wptr,
868 5 + /* vpe_ring_init_cond_exec */
869 6 + /* vpe_ring_emit_pipeline_sync */
870 10 + 10 + 10 + /* vpe_ring_emit_fence */
871 /* vpe_ring_emit_vm_flush */
872 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
873 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6,
874 .emit_ib_size = 7 + 6,
875 .emit_ib = vpe_ring_emit_ib,
876 .emit_pipeline_sync = vpe_ring_emit_pipeline_sync,
877 .emit_fence = vpe_ring_emit_fence,
878 .emit_vm_flush = vpe_ring_emit_vm_flush,
879 .emit_wreg = vpe_ring_emit_wreg,
880 .emit_reg_wait = vpe_ring_emit_reg_wait,
881 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
882 .insert_nop = vpe_ring_insert_nop,
883 .pad_ib = amdgpu_ring_generic_pad_ib,
884 .test_ring = vpe_ring_test_ring,
885 .test_ib = vpe_ring_test_ib,
886 .init_cond_exec = vpe_ring_init_cond_exec,
887 .preempt_ib = vpe_ring_preempt_ib,
888 .begin_use = vpe_ring_begin_use,
889 .end_use = vpe_ring_end_use,
892 static void vpe_set_ring_funcs(struct amdgpu_device *adev)
894 adev->vpe.ring.funcs = &vpe_ring_funcs;
897 const struct amd_ip_funcs vpe_ip_funcs = {
899 .early_init = vpe_early_init,
901 .sw_init = vpe_sw_init,
902 .sw_fini = vpe_sw_fini,
903 .hw_init = vpe_hw_init,
904 .hw_fini = vpe_hw_fini,
905 .suspend = vpe_suspend,
906 .resume = vpe_resume,
908 .set_clockgating_state = vpe_set_clockgating_state,
909 .set_powergating_state = vpe_set_powergating_state,
912 const struct amdgpu_ip_block_version vpe_v6_1_ip_block = {
913 .type = AMD_IP_BLOCK_TYPE_VPE,
917 .funcs = &vpe_ip_funcs,