2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <drm/drm_drv.h>
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_vpe.h"
29 #include "amdgpu_smu.h"
30 #include "soc15_common.h"
33 #define AMDGPU_CSA_VPE_SIZE 64
34 /* VPE CSA resides in the 4th page of CSA */
35 #define AMDGPU_CSA_VPE_OFFSET (4096 * 3)
37 /* 1 second timeout */
38 #define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000)
40 #define VPE_MAX_DPM_LEVEL 4
41 #define FIXED1_8_BITS_PER_FRACTIONAL_PART 8
42 #define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART)
44 static void vpe_set_ring_funcs(struct amdgpu_device *adev);
46 static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder)
48 *remainder = dividend % divisor;
49 return dividend / divisor;
52 static inline uint16_t complete_integer_division_u16(
57 return div16_u16_rem(dividend, divisor, (uint16_t *)remainder);
60 static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
62 u16 arg1_value = numerator;
63 u16 arg2_value = denominator;
67 /* determine integer part */
68 uint16_t res_value = complete_integer_division_u16(
69 arg1_value, arg2_value, &remainder);
71 if (res_value > 127 /* CHAR_MAX */)
74 /* determine fractional part */
76 unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART;
83 if (remainder >= arg2_value) {
85 remainder -= arg2_value;
92 uint16_t summand = (remainder << 1) >= arg2_value;
94 if ((res_value + summand) > 32767 /* SHRT_MAX */)
103 static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency)
105 uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency);
107 if (GET_PRATIO_INTEGER_PART(pratio) > 1)
114 * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
115 * VPE FW will dynamically decide which level should be used according to current loading.
117 * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
118 * calculate the ratios of adjusting from one clock to another.
119 * The VPE FW can then request the appropriate frequency from the PMFW.
121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe)
123 struct amdgpu_device *adev = vpe->ring.adev;
126 if (adev->pm.dpm_enabled) {
127 struct dpm_clocks clock_table = { 0 };
128 struct dpm_clock *VPEClks;
129 struct dpm_clock *SOCClks;
131 uint32_t vpeclk_enalbled_num = 0;
132 uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0;
133 uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0;
135 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
136 dpm_ctl |= 1; /* DPM enablement */
137 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
139 /* Get VPECLK and SOCCLK */
140 if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) {
141 dev_dbg(adev->dev, "%s: get clock failed!\n", __func__);
145 SOCClks = clock_table.SocClocks;
146 VPEClks = clock_table.VPEClocks;
148 /* Comfirm enabled vpe clk num
149 * Enabled VPE clocks are ordered from low to high in VPEClks
150 * The highest valid clock index+1 is the number of VPEClks
152 for (idx = PP_SMU_NUM_VPECLK_DPM_LEVELS; idx && !vpeclk_enalbled_num; idx--)
153 if (VPEClks[idx-1].Freq)
154 vpeclk_enalbled_num = idx;
156 /* vpe dpm only cares 4 levels. */
157 for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) {
158 uint32_t soc_dpm_level;
164 soc_dpm_level = (idx * 2) + 1;
166 /* clamp the max level */
167 if (soc_dpm_level > vpeclk_enalbled_num - 1)
168 soc_dpm_level = vpeclk_enalbled_num - 1;
170 min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ?
171 SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq;
175 pratio_vmin_freq = min_freq;
178 pratio_vmid_freq = min_freq;
181 pratio_vnorm_freq = min_freq;
184 pratio_vmax_freq = min_freq;
191 if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) {
194 pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq);
195 pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq);
196 pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq);
198 pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18);
199 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */
200 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */
201 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */
202 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */
203 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */
204 dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__);
206 dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__);
213 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
214 dpm_ctl &= 0xfffffffe; /* Disable DPM */
215 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
216 dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
220 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
222 struct amdgpu_firmware_info ucode = {
223 .ucode_id = AMDGPU_UCODE_ID_VPE,
224 .mc_addr = adev->vpe.cmdbuf_gpu_addr,
228 return psp_execute_ip_fw_load(&adev->psp, &ucode);
231 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
233 struct amdgpu_device *adev = vpe->ring.adev;
234 const struct vpe_firmware_header_v1_0 *vpe_hdr;
238 amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
239 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix);
243 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
244 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version);
245 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version);
247 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
248 struct amdgpu_firmware_info *info;
250 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX];
251 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX;
252 info->fw = adev->vpe.fw;
253 adev->firmware.fw_size +=
254 ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
256 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL];
257 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL;
258 info->fw = adev->vpe.fw;
259 adev->firmware.fw_size +=
260 ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
265 dev_err(adev->dev, "fail to initialize vpe microcode\n");
266 release_firmware(adev->vpe.fw);
271 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe)
273 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
274 struct amdgpu_ring *ring = &vpe->ring;
277 ring->ring_obj = NULL;
278 ring->use_doorbell = true;
279 ring->vm_hub = AMDGPU_MMHUB0(0);
280 ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1);
281 snprintf(ring->name, 4, "vpe");
283 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0,
284 AMDGPU_RING_PRIO_DEFAULT, NULL);
291 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
293 amdgpu_ring_fini(&vpe->ring);
298 static int vpe_early_init(struct amdgpu_ip_block *ip_block)
300 struct amdgpu_device *adev = ip_block->adev;
301 struct amdgpu_vpe *vpe = &adev->vpe;
303 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
304 case IP_VERSION(6, 1, 0):
305 case IP_VERSION(6, 1, 3):
306 vpe_v6_1_set_funcs(vpe);
308 case IP_VERSION(6, 1, 1):
309 vpe_v6_1_set_funcs(vpe);
310 vpe->collaborate_mode = true;
316 vpe_set_ring_funcs(adev);
319 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false");
324 static void vpe_idle_work_handler(struct work_struct *work)
326 struct amdgpu_device *adev =
327 container_of(work, struct amdgpu_device, vpe.idle_work.work);
328 unsigned int fences = 0;
330 fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
333 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
335 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
338 static int vpe_common_init(struct amdgpu_vpe *vpe)
340 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
343 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
344 AMDGPU_GEM_DOMAIN_GTT,
345 &adev->vpe.cmdbuf_obj,
346 &adev->vpe.cmdbuf_gpu_addr,
347 (void **)&adev->vpe.cmdbuf_cpu_addr);
349 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r);
353 vpe->context_started = false;
354 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler);
359 static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
361 struct amdgpu_device *adev = ip_block->adev;
362 struct amdgpu_vpe *vpe = &adev->vpe;
365 ret = vpe_common_init(vpe);
369 ret = vpe_irq_init(vpe);
373 ret = vpe_ring_init(vpe);
377 ret = vpe_init_microcode(vpe);
381 /* TODO: Add queue reset mask when FW fully supports it */
382 adev->vpe.supported_reset =
383 amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
384 ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
391 static int vpe_sw_fini(struct amdgpu_ip_block *ip_block)
393 struct amdgpu_device *adev = ip_block->adev;
394 struct amdgpu_vpe *vpe = &adev->vpe;
396 release_firmware(vpe->fw);
399 amdgpu_vpe_sysfs_reset_mask_fini(adev);
402 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
403 &adev->vpe.cmdbuf_gpu_addr,
404 (void **)&adev->vpe.cmdbuf_cpu_addr);
409 static int vpe_hw_init(struct amdgpu_ip_block *ip_block)
411 struct amdgpu_device *adev = ip_block->adev;
412 struct amdgpu_vpe *vpe = &adev->vpe;
416 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
417 AMD_PG_STATE_UNGATE);
421 ret = vpe_load_microcode(vpe);
425 ret = vpe_ring_start(vpe);
432 static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
434 struct amdgpu_device *adev = ip_block->adev;
435 struct amdgpu_vpe *vpe = &adev->vpe;
440 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
445 static int vpe_suspend(struct amdgpu_ip_block *ip_block)
447 struct amdgpu_device *adev = ip_block->adev;
449 cancel_delayed_work_sync(&adev->vpe.idle_work);
451 return vpe_hw_fini(ip_block);
454 static int vpe_resume(struct amdgpu_ip_block *ip_block)
456 return vpe_hw_init(ip_block);
459 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
463 for (i = 0; i < count; i++)
465 amdgpu_ring_write(ring, ring->funcs->nop |
466 VPE_CMD_NOP_HEADER_COUNT(count - 1));
468 amdgpu_ring_write(ring, ring->funcs->nop);
471 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid)
473 struct amdgpu_device *adev = ring->adev;
475 uint64_t csa_mc_addr;
477 if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
480 csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET +
481 index * AMDGPU_CSA_VPE_SIZE;
486 static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring,
487 uint32_t device_select,
490 if (!ring->adev->vpe.collaborate_mode)
493 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) |
494 (device_select << 16));
495 amdgpu_ring_write(ring, exec_count & 0x1fff);
498 static void vpe_ring_emit_ib(struct amdgpu_ring *ring,
499 struct amdgpu_job *job,
500 struct amdgpu_ib *ib,
503 uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
504 uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid);
506 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) |
507 VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf));
509 /* base must be 32 byte aligned */
510 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0);
511 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
512 amdgpu_ring_write(ring, ib->length_dw);
513 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
514 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
517 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
518 uint64_t seq, unsigned int flags)
523 /* write the fence */
524 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
525 /* zero in first two bits */
526 WARN_ON_ONCE(addr & 0x3);
527 amdgpu_ring_write(ring, lower_32_bits(addr));
528 amdgpu_ring_write(ring, upper_32_bits(addr));
529 amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq));
531 } while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1));
533 if (flags & AMDGPU_FENCE_FLAG_INT) {
534 /* generate an interrupt */
535 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0));
536 amdgpu_ring_write(ring, 0);
541 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
543 uint32_t seq = ring->fence_drv.sync_seq;
544 uint64_t addr = ring->fence_drv.gpu_addr;
546 vpe_ring_emit_pred_exec(ring, 0, 6);
549 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
550 VPE_POLL_REGMEM_SUBOP_REGMEM) |
551 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
552 VPE_CMD_POLL_REGMEM_HEADER_MEM(1));
553 amdgpu_ring_write(ring, addr & 0xfffffffc);
554 amdgpu_ring_write(ring, upper_32_bits(addr));
555 amdgpu_ring_write(ring, seq); /* reference */
556 amdgpu_ring_write(ring, 0xffffffff); /* mask */
557 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
558 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4));
561 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
563 vpe_ring_emit_pred_exec(ring, 0, 3);
565 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0));
566 amdgpu_ring_write(ring, reg << 2);
567 amdgpu_ring_write(ring, val);
570 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
571 uint32_t val, uint32_t mask)
573 vpe_ring_emit_pred_exec(ring, 0, 6);
575 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
576 VPE_POLL_REGMEM_SUBOP_REGMEM) |
577 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
578 VPE_CMD_POLL_REGMEM_HEADER_MEM(0));
579 amdgpu_ring_write(ring, reg << 2);
580 amdgpu_ring_write(ring, 0);
581 amdgpu_ring_write(ring, val); /* reference */
582 amdgpu_ring_write(ring, mask); /* mask */
583 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
584 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10));
587 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid,
590 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
593 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
598 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
599 amdgpu_ring_write(ring, lower_32_bits(addr));
600 amdgpu_ring_write(ring, upper_32_bits(addr));
601 amdgpu_ring_write(ring, 1);
602 ret = ring->wptr & ring->buf_mask;
603 amdgpu_ring_write(ring, 0);
608 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
610 struct amdgpu_device *adev = ring->adev;
611 struct amdgpu_vpe *vpe = &adev->vpe;
612 uint32_t preempt_reg = vpe->regs.queue0_preempt;
615 /* assert preemption condition */
616 amdgpu_ring_set_preempt_cond_exec(ring, false);
618 /* emit the trailing fence */
619 ring->trail_seq += 1;
620 amdgpu_ring_alloc(ring, 10);
621 vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0);
622 amdgpu_ring_commit(ring);
624 /* assert IB preemption */
625 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1);
627 /* poll the trailing fence */
628 for (i = 0; i < adev->usec_timeout; i++) {
629 if (ring->trail_seq ==
630 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
635 if (i >= adev->usec_timeout) {
637 dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx);
640 /* deassert IB preemption */
641 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0);
643 /* deassert the preemption condition */
644 amdgpu_ring_set_preempt_cond_exec(ring, true);
649 static int vpe_set_clockgating_state(void *handle,
650 enum amd_clockgating_state state)
655 static int vpe_set_powergating_state(void *handle,
656 enum amd_powergating_state state)
658 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
659 struct amdgpu_vpe *vpe = &adev->vpe;
661 if (!adev->pm.dpm_enabled)
662 dev_err(adev->dev, "Without PM, cannot support powergating\n");
664 dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE");
666 if (state == AMD_PG_STATE_GATE) {
667 amdgpu_dpm_enable_vpe(adev, false);
668 vpe->context_started = false;
670 amdgpu_dpm_enable_vpe(adev, true);
676 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring)
678 struct amdgpu_device *adev = ring->adev;
679 struct amdgpu_vpe *vpe = &adev->vpe;
682 if (ring->use_doorbell) {
683 rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr);
684 dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr);
686 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi));
688 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo));
689 dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr);
695 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring)
697 struct amdgpu_device *adev = ring->adev;
698 struct amdgpu_vpe *vpe = &adev->vpe;
701 if (ring->use_doorbell) {
702 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
703 dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr);
705 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi));
707 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo));
708 dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr);
714 static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
716 struct amdgpu_device *adev = ring->adev;
717 struct amdgpu_vpe *vpe = &adev->vpe;
719 if (ring->use_doorbell) {
720 dev_dbg(adev->dev, "Using doorbell, \
721 wptr_offs == 0x%08x, \
722 lower_32_bits(ring->wptr) << 2 == 0x%08x, \
723 upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
725 lower_32_bits(ring->wptr << 2),
726 upper_32_bits(ring->wptr << 2));
727 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
728 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
729 if (vpe->collaborate_mode)
730 WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2);
734 for (i = 0; i < vpe->num_instances; i++) {
735 dev_dbg(adev->dev, "Not using doorbell, \
736 regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
737 regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
738 lower_32_bits(ring->wptr << 2),
739 upper_32_bits(ring->wptr << 2));
740 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo),
741 lower_32_bits(ring->wptr << 2));
742 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi),
743 upper_32_bits(ring->wptr << 2));
748 static int vpe_ring_test_ring(struct amdgpu_ring *ring)
750 struct amdgpu_device *adev = ring->adev;
751 const uint32_t test_pattern = 0xdeadbeef;
756 ret = amdgpu_device_wb_get(adev, &index);
758 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
762 adev->wb.wb[index] = 0;
763 wb_addr = adev->wb.gpu_addr + (index * 4);
765 ret = amdgpu_ring_alloc(ring, 4);
767 dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret);
771 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
772 amdgpu_ring_write(ring, lower_32_bits(wb_addr));
773 amdgpu_ring_write(ring, upper_32_bits(wb_addr));
774 amdgpu_ring_write(ring, test_pattern);
775 amdgpu_ring_commit(ring);
777 for (i = 0; i < adev->usec_timeout; i++) {
778 if (le32_to_cpu(adev->wb.wb[index]) == test_pattern)
785 amdgpu_device_wb_free(adev, index);
790 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
792 struct amdgpu_device *adev = ring->adev;
793 const uint32_t test_pattern = 0xdeadbeef;
794 struct amdgpu_ib ib = {};
795 struct dma_fence *f = NULL;
800 ret = amdgpu_device_wb_get(adev, &index);
802 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
806 adev->wb.wb[index] = 0;
807 wb_addr = adev->wb.gpu_addr + (index * 4);
809 ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
813 ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
814 ib.ptr[1] = lower_32_bits(wb_addr);
815 ib.ptr[2] = upper_32_bits(wb_addr);
816 ib.ptr[3] = test_pattern;
817 ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
818 ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
819 ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
820 ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
823 ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
827 ret = dma_fence_wait_timeout(f, false, timeout);
829 ret = ret ? : -ETIMEDOUT;
833 ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
836 amdgpu_ib_free(adev, &ib, NULL);
839 amdgpu_device_wb_free(adev, index);
844 static void vpe_ring_begin_use(struct amdgpu_ring *ring)
846 struct amdgpu_device *adev = ring->adev;
847 struct amdgpu_vpe *vpe = &adev->vpe;
849 cancel_delayed_work_sync(&adev->vpe.idle_work);
851 /* Power on VPE and notify VPE of new context */
852 if (!vpe->context_started) {
853 uint32_t context_notify;
856 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE);
858 /* Indicates that a job from a new context has been submitted. */
859 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator));
860 if ((context_notify & 0x1) == 0)
861 context_notify |= 0x1;
863 context_notify &= ~(0x1);
864 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify);
865 vpe->context_started = true;
869 static void vpe_ring_end_use(struct amdgpu_ring *ring)
871 struct amdgpu_device *adev = ring->adev;
873 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
876 static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
877 struct device_attribute *attr,
880 struct drm_device *ddev = dev_get_drvdata(dev);
881 struct amdgpu_device *adev = drm_to_adev(ddev);
886 return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset);
889 static DEVICE_ATTR(vpe_reset_mask, 0444,
890 amdgpu_get_vpe_reset_mask, NULL);
892 int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev)
896 if (adev->vpe.num_instances) {
897 r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask);
905 void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev)
907 if (adev->vpe.num_instances)
908 device_remove_file(adev->dev, &dev_attr_vpe_reset_mask);
911 static const struct amdgpu_ring_funcs vpe_ring_funcs = {
912 .type = AMDGPU_RING_TYPE_VPE,
914 .nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0),
915 .support_64bit_ptrs = true,
916 .get_rptr = vpe_ring_get_rptr,
917 .get_wptr = vpe_ring_get_wptr,
918 .set_wptr = vpe_ring_set_wptr,
920 5 + /* vpe_ring_init_cond_exec */
921 6 + /* vpe_ring_emit_pipeline_sync */
922 10 + 10 + 10 + /* vpe_ring_emit_fence */
923 /* vpe_ring_emit_vm_flush */
924 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
925 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6,
926 .emit_ib_size = 7 + 6,
927 .emit_ib = vpe_ring_emit_ib,
928 .emit_pipeline_sync = vpe_ring_emit_pipeline_sync,
929 .emit_fence = vpe_ring_emit_fence,
930 .emit_vm_flush = vpe_ring_emit_vm_flush,
931 .emit_wreg = vpe_ring_emit_wreg,
932 .emit_reg_wait = vpe_ring_emit_reg_wait,
933 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
934 .insert_nop = vpe_ring_insert_nop,
935 .pad_ib = amdgpu_ring_generic_pad_ib,
936 .test_ring = vpe_ring_test_ring,
937 .test_ib = vpe_ring_test_ib,
938 .init_cond_exec = vpe_ring_init_cond_exec,
939 .preempt_ib = vpe_ring_preempt_ib,
940 .begin_use = vpe_ring_begin_use,
941 .end_use = vpe_ring_end_use,
944 static void vpe_set_ring_funcs(struct amdgpu_device *adev)
946 adev->vpe.ring.funcs = &vpe_ring_funcs;
949 const struct amd_ip_funcs vpe_ip_funcs = {
951 .early_init = vpe_early_init,
952 .sw_init = vpe_sw_init,
953 .sw_fini = vpe_sw_fini,
954 .hw_init = vpe_hw_init,
955 .hw_fini = vpe_hw_fini,
956 .suspend = vpe_suspend,
957 .resume = vpe_resume,
958 .set_clockgating_state = vpe_set_clockgating_state,
959 .set_powergating_state = vpe_set_powergating_state,
962 const struct amdgpu_ip_block_version vpe_v6_1_ip_block = {
963 .type = AMD_IP_BLOCK_TYPE_VPE,
967 .funcs = &vpe_ip_funcs,