2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
30 #include "amdgpu_vce.h"
32 #include "soc15_common.h"
33 #include "mmsch_v1_0.h"
35 #include "vega10/soc15ip.h"
36 #include "vega10/VCE/vce_4_0_offset.h"
37 #include "vega10/VCE/vce_4_0_default.h"
38 #include "vega10/VCE/vce_4_0_sh_mask.h"
39 #include "vega10/MMHUB/mmhub_1_0_offset.h"
40 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
42 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
44 #define VCE_V4_0_FW_SIZE (384 * 1024)
45 #define VCE_V4_0_STACK_SIZE (64 * 1024)
46 #define VCE_V4_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
48 static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
49 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
50 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
53 * vce_v4_0_ring_get_rptr - get read pointer
55 * @ring: amdgpu_ring pointer
57 * Returns the current hardware read pointer
59 static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
61 struct amdgpu_device *adev = ring->adev;
63 if (ring == &adev->vce.ring[0])
64 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
65 else if (ring == &adev->vce.ring[1])
66 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
68 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
72 * vce_v4_0_ring_get_wptr - get write pointer
74 * @ring: amdgpu_ring pointer
76 * Returns the current hardware write pointer
78 static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
80 struct amdgpu_device *adev = ring->adev;
82 if (ring->use_doorbell)
83 return adev->wb.wb[ring->wptr_offs];
85 if (ring == &adev->vce.ring[0])
86 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
87 else if (ring == &adev->vce.ring[1])
88 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
90 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
94 * vce_v4_0_ring_set_wptr - set write pointer
96 * @ring: amdgpu_ring pointer
98 * Commits the write pointer to the hardware
100 static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
102 struct amdgpu_device *adev = ring->adev;
104 if (ring->use_doorbell) {
105 /* XXX check if swapping is necessary on BE */
106 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
107 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
111 if (ring == &adev->vce.ring[0])
112 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
113 lower_32_bits(ring->wptr));
114 else if (ring == &adev->vce.ring[1])
115 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
116 lower_32_bits(ring->wptr));
118 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
119 lower_32_bits(ring->wptr));
122 static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
126 for (i = 0; i < 10; ++i) {
127 for (j = 0; j < 100; ++j) {
129 RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
131 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
136 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
137 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
138 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
139 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
141 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
142 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
150 static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
151 struct amdgpu_mm_table *table)
153 uint32_t data = 0, loop;
154 uint64_t addr = table->gpu_addr;
155 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
158 size = header->header_size + header->vce_table_size + header->uvd_table_size;
160 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
161 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
162 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
164 /* 2, update vmid of descriptor */
165 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
166 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
167 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
168 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
170 /* 3, notify mmsch about the size of this descriptor */
171 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
173 /* 4, set resp to zero */
174 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
176 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
177 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
179 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
181 while ((data & 0x10000002) != 0x10000002) {
183 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
190 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
197 static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
199 struct amdgpu_ring *ring;
200 uint32_t offset, size;
201 uint32_t table_size = 0;
202 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
203 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
204 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
205 struct mmsch_v1_0_cmd_end end = { { 0 } };
206 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
207 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
209 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
210 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
211 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
212 end.cmd_header.command_type = MMSCH_COMMAND__END;
214 if (header->vce_table_offset == 0 && header->vce_table_size == 0) {
215 header->version = MMSCH_VERSION;
216 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
218 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0)
219 header->vce_table_offset = header->header_size;
221 header->vce_table_offset = header->uvd_table_size + header->uvd_table_offset;
223 init_table += header->vce_table_offset;
225 ring = &adev->vce.ring[0];
226 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
227 lower_32_bits(ring->gpu_addr));
228 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
229 upper_32_bits(ring->gpu_addr));
230 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
231 ring->ring_size / 4);
233 /* BEGING OF MC_RESUME */
234 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
235 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
236 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
237 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
238 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
240 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
241 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
242 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
243 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
244 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
246 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
248 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
249 adev->vce.gpu_addr >> 8);
250 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
251 adev->vce.gpu_addr >> 8);
252 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
253 adev->vce.gpu_addr >> 8);
256 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
257 size = VCE_V4_0_FW_SIZE;
258 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
259 offset & 0x7FFFFFFF);
260 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
263 size = VCE_V4_0_STACK_SIZE;
264 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
265 offset & 0x7FFFFFFF);
266 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
269 size = VCE_V4_0_DATA_SIZE;
270 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
271 offset & 0x7FFFFFFF);
272 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
274 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
275 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
276 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
278 /* end of MC_RESUME */
279 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
280 VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
281 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
282 ~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
283 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
284 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
286 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
287 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
288 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
290 /* clear BUSY flag */
291 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
292 ~VCE_STATUS__JOB_BUSY_MASK, 0);
295 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
296 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
297 header->vce_table_size = table_size;
299 return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
302 return -EINVAL; /* already initializaed ? */
306 * vce_v4_0_start - start VCE block
308 * @adev: amdgpu_device pointer
310 * Setup and start the VCE block
312 static int vce_v4_0_start(struct amdgpu_device *adev)
314 struct amdgpu_ring *ring;
317 ring = &adev->vce.ring[0];
319 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
320 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
321 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
322 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
323 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
325 ring = &adev->vce.ring[1];
327 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
328 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
329 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
330 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
331 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
333 ring = &adev->vce.ring[2];
335 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
336 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
337 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
338 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
339 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
341 vce_v4_0_mc_resume(adev);
342 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
343 ~VCE_STATUS__JOB_BUSY_MASK);
345 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
347 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
348 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
351 r = vce_v4_0_firmware_loaded(adev);
353 /* clear BUSY flag */
354 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
357 DRM_ERROR("VCE not responding, giving up!!!\n");
364 static int vce_v4_0_stop(struct amdgpu_device *adev)
367 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
370 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
371 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
372 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
374 /* clear BUSY flag */
375 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
377 /* Set Clock-Gating off */
378 /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
379 vce_v4_0_set_vce_sw_clock_gating(adev, false);
385 static int vce_v4_0_early_init(void *handle)
387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
389 if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
390 adev->vce.num_rings = 1;
392 adev->vce.num_rings = 3;
394 vce_v4_0_set_ring_funcs(adev);
395 vce_v4_0_set_irq_funcs(adev);
400 static int vce_v4_0_sw_init(void *handle)
402 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
403 struct amdgpu_ring *ring;
407 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
411 size = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2;
412 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
413 size += VCE_V4_0_FW_SIZE;
415 r = amdgpu_vce_sw_init(adev, size);
419 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
420 const struct common_firmware_header *hdr;
421 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
422 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
423 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
424 adev->firmware.fw_size +=
425 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
426 DRM_INFO("PSP loading VCE firmware\n");
429 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
430 r = amdgpu_vce_resume(adev);
435 for (i = 0; i < adev->vce.num_rings; i++) {
436 ring = &adev->vce.ring[i];
437 sprintf(ring->name, "vce%d", i);
438 if (amdgpu_sriov_vf(adev)) {
439 /* DOORBELL only works under SRIOV */
440 ring->use_doorbell = true;
442 ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2;
444 ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2;
446 ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1;
448 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
453 r = amdgpu_virt_alloc_mm_table(adev);
460 static int vce_v4_0_sw_fini(void *handle)
463 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
466 amdgpu_virt_free_mm_table(adev);
468 r = amdgpu_vce_suspend(adev);
472 return amdgpu_vce_sw_fini(adev);
475 static int vce_v4_0_hw_init(void *handle)
478 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
480 if (amdgpu_sriov_vf(adev))
481 r = vce_v4_0_sriov_start(adev);
483 r = vce_v4_0_start(adev);
487 for (i = 0; i < adev->vce.num_rings; i++)
488 adev->vce.ring[i].ready = false;
490 for (i = 0; i < adev->vce.num_rings; i++) {
491 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
495 adev->vce.ring[i].ready = true;
498 DRM_INFO("VCE initialized successfully.\n");
503 static int vce_v4_0_hw_fini(void *handle)
505 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
508 /* vce_v4_0_wait_for_idle(handle); */
510 for (i = 0; i < adev->vce.num_rings; i++)
511 adev->vce.ring[i].ready = false;
516 static int vce_v4_0_suspend(void *handle)
519 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
521 r = vce_v4_0_hw_fini(adev);
525 return amdgpu_vce_suspend(adev);
528 static int vce_v4_0_resume(void *handle)
531 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
533 r = amdgpu_vce_resume(adev);
537 return vce_v4_0_hw_init(adev);
540 static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
542 uint32_t offset, size;
544 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
545 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
546 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
547 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
549 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
550 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
551 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
552 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
553 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
555 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
556 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
557 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8));
558 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
559 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
561 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
562 (adev->vce.gpu_addr >> 8));
563 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
564 (adev->vce.gpu_addr >> 40) & 0xff);
567 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
568 size = VCE_V4_0_FW_SIZE;
569 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
570 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
572 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
573 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
574 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
575 size = VCE_V4_0_STACK_SIZE;
576 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
577 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
579 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
580 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
582 size = VCE_V4_0_DATA_SIZE;
583 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
584 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
586 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
587 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
588 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
589 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
592 static int vce_v4_0_set_clockgating_state(void *handle,
593 enum amd_clockgating_state state)
595 /* needed for driver unload*/
600 static bool vce_v4_0_is_idle(void *handle)
602 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
605 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
606 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
608 return !(RREG32(mmSRBM_STATUS2) & mask);
611 static int vce_v4_0_wait_for_idle(void *handle)
614 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
616 for (i = 0; i < adev->usec_timeout; i++)
617 if (vce_v4_0_is_idle(handle))
623 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
624 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
625 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
626 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
627 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
629 static bool vce_v4_0_check_soft_reset(void *handle)
631 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
632 u32 srbm_soft_reset = 0;
634 /* According to VCE team , we should use VCE_STATUS instead
635 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
636 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
637 * instance's registers are accessed
638 * (0 for 1st instance, 10 for 2nd instance).
641 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
642 *|----+----+-----------+----+----+----+----------+---------+----|
643 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
645 * VCE team suggest use bit 3--bit 6 for busy status check
647 mutex_lock(&adev->grbm_idx_mutex);
648 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
649 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
650 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
651 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
653 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
654 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
655 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
656 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
658 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
659 mutex_unlock(&adev->grbm_idx_mutex);
661 if (srbm_soft_reset) {
662 adev->vce.srbm_soft_reset = srbm_soft_reset;
665 adev->vce.srbm_soft_reset = 0;
670 static int vce_v4_0_soft_reset(void *handle)
672 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
675 if (!adev->vce.srbm_soft_reset)
677 srbm_soft_reset = adev->vce.srbm_soft_reset;
679 if (srbm_soft_reset) {
682 tmp = RREG32(mmSRBM_SOFT_RESET);
683 tmp |= srbm_soft_reset;
684 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
685 WREG32(mmSRBM_SOFT_RESET, tmp);
686 tmp = RREG32(mmSRBM_SOFT_RESET);
690 tmp &= ~srbm_soft_reset;
691 WREG32(mmSRBM_SOFT_RESET, tmp);
692 tmp = RREG32(mmSRBM_SOFT_RESET);
694 /* Wait a little for things to settle down */
701 static int vce_v4_0_pre_soft_reset(void *handle)
703 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
705 if (!adev->vce.srbm_soft_reset)
710 return vce_v4_0_suspend(adev);
714 static int vce_v4_0_post_soft_reset(void *handle)
716 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
718 if (!adev->vce.srbm_soft_reset)
723 return vce_v4_0_resume(adev);
726 static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
730 tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
732 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
734 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
737 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
740 static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
745 /* Set Override to disable Clock Gating */
746 vce_v4_0_override_vce_clock_gating(adev, true);
748 /* This function enables MGCG which is controlled by firmware.
749 With the clocks in the gated state the core is still
750 accessible but the firmware will throttle the clocks on the
754 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
757 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
759 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
762 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
764 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
767 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
769 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
771 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
773 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
774 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
775 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
776 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
778 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
780 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
783 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
785 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
787 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
789 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
791 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
793 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
795 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
797 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
798 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
799 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
800 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
802 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
804 vce_v4_0_override_vce_clock_gating(adev, false);
807 static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
809 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
812 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
814 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
816 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
819 static int vce_v4_0_set_clockgating_state(void *handle,
820 enum amd_clockgating_state state)
822 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
823 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
826 if ((adev->asic_type == CHIP_POLARIS10) ||
827 (adev->asic_type == CHIP_TONGA) ||
828 (adev->asic_type == CHIP_FIJI))
829 vce_v4_0_set_bypass_mode(adev, enable);
831 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
834 mutex_lock(&adev->grbm_idx_mutex);
835 for (i = 0; i < 2; i++) {
836 /* Program VCE Instance 0 or 1 if not harvested */
837 if (adev->vce.harvest_config & (1 << i))
840 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
843 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
844 uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
845 data &= ~(0xf | 0xff0);
846 data |= ((0x0 << 0) | (0x04 << 4));
847 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
849 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
850 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
851 data &= ~(0xf | 0xff0);
852 data |= ((0x0 << 0) | (0x04 << 4));
853 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
856 vce_v4_0_set_vce_sw_clock_gating(adev, enable);
859 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
860 mutex_unlock(&adev->grbm_idx_mutex);
865 static int vce_v4_0_set_powergating_state(void *handle,
866 enum amd_powergating_state state)
868 /* This doesn't actually powergate the VCE block.
869 * That's done in the dpm code via the SMC. This
870 * just re-inits the block as necessary. The actual
871 * gating still happens in the dpm code. We should
872 * revisit this when there is a cleaner line between
873 * the smc and the hw blocks
875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
877 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
880 if (state == AMD_PG_STATE_GATE)
881 /* XXX do we need a vce_v4_0_stop()? */
884 return vce_v4_0_start(adev);
888 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
889 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
891 amdgpu_ring_write(ring, VCE_CMD_IB_VM);
892 amdgpu_ring_write(ring, vm_id);
893 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
894 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
895 amdgpu_ring_write(ring, ib->length_dw);
898 static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
899 u64 seq, unsigned flags)
901 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
903 amdgpu_ring_write(ring, VCE_CMD_FENCE);
904 amdgpu_ring_write(ring, addr);
905 amdgpu_ring_write(ring, upper_32_bits(addr));
906 amdgpu_ring_write(ring, seq);
907 amdgpu_ring_write(ring, VCE_CMD_TRAP);
910 static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
912 amdgpu_ring_write(ring, VCE_CMD_END);
915 static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
916 unsigned int vm_id, uint64_t pd_addr)
918 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
919 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
920 unsigned eng = ring->vm_inv_eng;
922 pd_addr = pd_addr | 0x1; /* valid bit */
923 /* now only use physical base address of PDE and valid */
924 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
926 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
927 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
928 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
930 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
931 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
932 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
934 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
935 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
936 amdgpu_ring_write(ring, 0xffffffff);
937 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
940 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
941 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
942 amdgpu_ring_write(ring, req);
945 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
946 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
947 amdgpu_ring_write(ring, 1 << vm_id);
948 amdgpu_ring_write(ring, 1 << vm_id);
951 static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
952 struct amdgpu_irq_src *source,
954 enum amdgpu_interrupt_state state)
958 if (state == AMDGPU_IRQ_STATE_ENABLE)
959 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
961 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
962 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
966 static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
967 struct amdgpu_irq_src *source,
968 struct amdgpu_iv_entry *entry)
970 DRM_DEBUG("IH: VCE\n");
972 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
973 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
974 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
976 switch (entry->src_data[0]) {
980 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
983 DRM_ERROR("Unhandled interrupt: %d %d\n",
984 entry->src_id, entry->src_data[0]);
991 const struct amd_ip_funcs vce_v4_0_ip_funcs = {
993 .early_init = vce_v4_0_early_init,
995 .sw_init = vce_v4_0_sw_init,
996 .sw_fini = vce_v4_0_sw_fini,
997 .hw_init = vce_v4_0_hw_init,
998 .hw_fini = vce_v4_0_hw_fini,
999 .suspend = vce_v4_0_suspend,
1000 .resume = vce_v4_0_resume,
1001 .is_idle = NULL /* vce_v4_0_is_idle */,
1002 .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
1003 .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
1004 .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
1005 .soft_reset = NULL /* vce_v4_0_soft_reset */,
1006 .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
1007 .set_clockgating_state = vce_v4_0_set_clockgating_state,
1008 .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
1011 static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1012 .type = AMDGPU_RING_TYPE_VCE,
1014 .nop = VCE_CMD_NO_OP,
1015 .support_64bit_ptrs = false,
1016 .vmhub = AMDGPU_MMHUB,
1017 .get_rptr = vce_v4_0_ring_get_rptr,
1018 .get_wptr = vce_v4_0_ring_get_wptr,
1019 .set_wptr = vce_v4_0_ring_set_wptr,
1020 .parse_cs = amdgpu_vce_ring_parse_cs_vm,
1022 17 + /* vce_v4_0_emit_vm_flush */
1023 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1024 1, /* vce_v4_0_ring_insert_end */
1025 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
1026 .emit_ib = vce_v4_0_ring_emit_ib,
1027 .emit_vm_flush = vce_v4_0_emit_vm_flush,
1028 .emit_fence = vce_v4_0_ring_emit_fence,
1029 .test_ring = amdgpu_vce_ring_test_ring,
1030 .test_ib = amdgpu_vce_ring_test_ib,
1031 .insert_nop = amdgpu_ring_insert_nop,
1032 .insert_end = vce_v4_0_ring_insert_end,
1033 .pad_ib = amdgpu_ring_generic_pad_ib,
1034 .begin_use = amdgpu_vce_ring_begin_use,
1035 .end_use = amdgpu_vce_ring_end_use,
1038 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1042 for (i = 0; i < adev->vce.num_rings; i++)
1043 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
1044 DRM_INFO("VCE enabled in VM mode\n");
1047 static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
1048 .set = vce_v4_0_set_interrupt_state,
1049 .process = vce_v4_0_process_interrupt,
1052 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1054 adev->vce.irq.num_types = 1;
1055 adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
1058 const struct amdgpu_ip_block_version vce_v4_0_ip_block =
1060 .type = AMD_IP_BLOCK_TYPE_VCE,
1064 .funcs = &vce_v4_0_ip_funcs,