2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
30 #include "amdgpu_vce.h"
33 #include "soc15_common.h"
34 #include "mmsch_v1_0.h"
36 #include "vce/vce_4_0_offset.h"
37 #include "vce/vce_4_0_default.h"
38 #include "vce/vce_4_0_sh_mask.h"
39 #include "mmhub/mmhub_1_0_offset.h"
40 #include "mmhub/mmhub_1_0_sh_mask.h"
42 #include "ivsrcid/vce/irqsrcs_vce_4_0.h"
44 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
46 #define VCE_V4_0_FW_SIZE (384 * 1024)
47 #define VCE_V4_0_STACK_SIZE (64 * 1024)
48 #define VCE_V4_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
50 static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
51 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
52 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
55 * vce_v4_0_ring_get_rptr - get read pointer
57 * @ring: amdgpu_ring pointer
59 * Returns the current hardware read pointer
61 static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
63 struct amdgpu_device *adev = ring->adev;
66 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
67 else if (ring->me == 1)
68 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
70 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
74 * vce_v4_0_ring_get_wptr - get write pointer
76 * @ring: amdgpu_ring pointer
78 * Returns the current hardware write pointer
80 static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
82 struct amdgpu_device *adev = ring->adev;
84 if (ring->use_doorbell)
85 return adev->wb.wb[ring->wptr_offs];
88 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
89 else if (ring->me == 1)
90 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
92 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
96 * vce_v4_0_ring_set_wptr - set write pointer
98 * @ring: amdgpu_ring pointer
100 * Commits the write pointer to the hardware
102 static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
104 struct amdgpu_device *adev = ring->adev;
106 if (ring->use_doorbell) {
107 /* XXX check if swapping is necessary on BE */
108 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
109 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
114 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
115 lower_32_bits(ring->wptr));
116 else if (ring->me == 1)
117 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
118 lower_32_bits(ring->wptr));
120 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
121 lower_32_bits(ring->wptr));
124 static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
128 for (i = 0; i < 10; ++i) {
129 for (j = 0; j < 100; ++j) {
131 RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
133 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
138 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
139 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
140 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
141 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
143 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
144 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
152 static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
153 struct amdgpu_mm_table *table)
155 uint32_t data = 0, loop;
156 uint64_t addr = table->gpu_addr;
157 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
160 size = header->header_size + header->vce_table_size + header->uvd_table_size;
162 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
163 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
164 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
166 /* 2, update vmid of descriptor */
167 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
168 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
169 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
170 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
172 /* 3, notify mmsch about the size of this descriptor */
173 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
175 /* 4, set resp to zero */
176 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
178 WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
179 adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0;
180 adev->vce.ring[0].wptr = 0;
181 adev->vce.ring[0].wptr_old = 0;
183 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
184 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
186 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
188 while ((data & 0x10000002) != 0x10000002) {
190 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
197 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
204 static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
206 struct amdgpu_ring *ring;
207 uint32_t offset, size;
208 uint32_t table_size = 0;
209 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
210 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
211 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
212 struct mmsch_v1_0_cmd_end end = { { 0 } };
213 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
214 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
216 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
217 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
218 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
219 end.cmd_header.command_type = MMSCH_COMMAND__END;
221 if (header->vce_table_offset == 0 && header->vce_table_size == 0) {
222 header->version = MMSCH_VERSION;
223 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
225 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0)
226 header->vce_table_offset = header->header_size;
228 header->vce_table_offset = header->uvd_table_size + header->uvd_table_offset;
230 init_table += header->vce_table_offset;
232 ring = &adev->vce.ring[0];
233 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
234 lower_32_bits(ring->gpu_addr));
235 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
236 upper_32_bits(ring->gpu_addr));
237 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
238 ring->ring_size / 4);
240 /* BEGING OF MC_RESUME */
241 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
242 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
243 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
247 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
248 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
249 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
250 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
251 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
252 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
253 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
255 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
256 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
257 adev->vce.gpu_addr >> 8);
258 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
259 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
260 (adev->vce.gpu_addr >> 40) & 0xff);
262 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
263 mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
264 adev->vce.gpu_addr >> 8);
265 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
266 mmVCE_LMI_VCPU_CACHE_64BIT_BAR1),
267 (adev->vce.gpu_addr >> 40) & 0xff);
268 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
269 mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
270 adev->vce.gpu_addr >> 8);
271 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
272 mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
273 (adev->vce.gpu_addr >> 40) & 0xff);
275 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
276 size = VCE_V4_0_FW_SIZE;
277 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
278 offset & ~0x0f000000);
279 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
281 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
282 size = VCE_V4_0_STACK_SIZE;
283 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
284 (offset & ~0x0f000000) | (1 << 24));
285 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
288 size = VCE_V4_0_DATA_SIZE;
289 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
290 (offset & ~0x0f000000) | (2 << 24));
291 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
293 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
294 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
295 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
296 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
298 /* end of MC_RESUME */
299 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
300 VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
301 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
302 ~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
303 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
304 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
306 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
307 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
308 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
310 /* clear BUSY flag */
311 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
312 ~VCE_STATUS__JOB_BUSY_MASK, 0);
315 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
316 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
317 header->vce_table_size = table_size;
320 return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
324 * vce_v4_0_start - start VCE block
326 * @adev: amdgpu_device pointer
328 * Setup and start the VCE block
330 static int vce_v4_0_start(struct amdgpu_device *adev)
332 struct amdgpu_ring *ring;
335 ring = &adev->vce.ring[0];
337 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
338 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
339 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
340 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
341 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
343 ring = &adev->vce.ring[1];
345 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
346 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
347 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
348 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
349 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
351 ring = &adev->vce.ring[2];
353 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
354 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
355 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
356 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
357 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
359 vce_v4_0_mc_resume(adev);
360 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
361 ~VCE_STATUS__JOB_BUSY_MASK);
363 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
365 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
366 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
369 r = vce_v4_0_firmware_loaded(adev);
371 /* clear BUSY flag */
372 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
375 DRM_ERROR("VCE not responding, giving up!!!\n");
382 static int vce_v4_0_stop(struct amdgpu_device *adev)
385 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
388 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
389 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
390 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
392 /* clear BUSY flag */
393 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
395 /* Set Clock-Gating off */
396 /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
397 vce_v4_0_set_vce_sw_clock_gating(adev, false);
403 static int vce_v4_0_early_init(void *handle)
405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
407 if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
408 adev->vce.num_rings = 1;
410 adev->vce.num_rings = 3;
412 vce_v4_0_set_ring_funcs(adev);
413 vce_v4_0_set_irq_funcs(adev);
418 static int vce_v4_0_sw_init(void *handle)
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421 struct amdgpu_ring *ring;
426 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
430 size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE;
431 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
432 size += VCE_V4_0_FW_SIZE;
434 r = amdgpu_vce_sw_init(adev, size);
438 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
439 const struct common_firmware_header *hdr;
440 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
442 adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
443 if (!adev->vce.saved_bo)
446 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
447 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
448 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
449 adev->firmware.fw_size +=
450 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
451 DRM_INFO("PSP loading VCE firmware\n");
453 r = amdgpu_vce_resume(adev);
458 for (i = 0; i < adev->vce.num_rings; i++) {
459 ring = &adev->vce.ring[i];
460 sprintf(ring->name, "vce%d", i);
461 if (amdgpu_sriov_vf(adev)) {
462 /* DOORBELL only works under SRIOV */
463 ring->use_doorbell = true;
465 /* currently only use the first encoding ring for sriov,
466 * so set unused location for other unused rings.
469 ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
471 ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
473 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
479 r = amdgpu_vce_entity_init(adev);
483 r = amdgpu_virt_alloc_mm_table(adev);
490 static int vce_v4_0_sw_fini(void *handle)
493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
496 amdgpu_virt_free_mm_table(adev);
498 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
499 kvfree(adev->vce.saved_bo);
500 adev->vce.saved_bo = NULL;
503 r = amdgpu_vce_suspend(adev);
507 return amdgpu_vce_sw_fini(adev);
510 static int vce_v4_0_hw_init(void *handle)
513 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
515 if (amdgpu_sriov_vf(adev))
516 r = vce_v4_0_sriov_start(adev);
518 r = vce_v4_0_start(adev);
522 for (i = 0; i < adev->vce.num_rings; i++)
523 adev->vce.ring[i].ready = false;
525 for (i = 0; i < adev->vce.num_rings; i++) {
526 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
530 adev->vce.ring[i].ready = true;
533 DRM_INFO("VCE initialized successfully.\n");
538 static int vce_v4_0_hw_fini(void *handle)
540 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
543 if (!amdgpu_sriov_vf(adev)) {
544 /* vce_v4_0_wait_for_idle(handle); */
547 /* full access mode, so don't touch any VCE register */
548 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
551 for (i = 0; i < adev->vce.num_rings; i++)
552 adev->vce.ring[i].ready = false;
557 static int vce_v4_0_suspend(void *handle)
559 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
562 if (adev->vce.vcpu_bo == NULL)
565 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
566 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
567 void *ptr = adev->vce.cpu_addr;
569 memcpy_fromio(adev->vce.saved_bo, ptr, size);
572 r = vce_v4_0_hw_fini(adev);
576 return amdgpu_vce_suspend(adev);
579 static int vce_v4_0_resume(void *handle)
581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
584 if (adev->vce.vcpu_bo == NULL)
587 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
588 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
589 void *ptr = adev->vce.cpu_addr;
591 memcpy_toio(ptr, adev->vce.saved_bo, size);
593 r = amdgpu_vce_resume(adev);
598 return vce_v4_0_hw_init(adev);
601 static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
603 uint32_t offset, size;
604 uint64_t tmr_mc_addr;
606 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
607 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
608 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
609 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
611 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
612 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
613 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
614 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
615 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
617 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
619 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
620 tmr_mc_addr = (uint64_t)(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi) << 32 |
621 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
622 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
624 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
625 (tmr_mc_addr >> 40) & 0xff);
626 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
628 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
629 (adev->vce.gpu_addr >> 8));
630 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
631 (adev->vce.gpu_addr >> 40) & 0xff);
632 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
635 size = VCE_V4_0_FW_SIZE;
636 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
638 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
639 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
640 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
641 size = VCE_V4_0_STACK_SIZE;
642 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
643 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
645 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
646 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
648 size = VCE_V4_0_DATA_SIZE;
649 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
650 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
652 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
653 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
654 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
655 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
658 static int vce_v4_0_set_clockgating_state(void *handle,
659 enum amd_clockgating_state state)
661 /* needed for driver unload*/
666 static bool vce_v4_0_is_idle(void *handle)
668 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
671 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
672 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
674 return !(RREG32(mmSRBM_STATUS2) & mask);
677 static int vce_v4_0_wait_for_idle(void *handle)
680 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
682 for (i = 0; i < adev->usec_timeout; i++)
683 if (vce_v4_0_is_idle(handle))
689 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
690 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
691 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
692 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
693 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
695 static bool vce_v4_0_check_soft_reset(void *handle)
697 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
698 u32 srbm_soft_reset = 0;
700 /* According to VCE team , we should use VCE_STATUS instead
701 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
702 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
703 * instance's registers are accessed
704 * (0 for 1st instance, 10 for 2nd instance).
707 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
708 *|----+----+-----------+----+----+----+----------+---------+----|
709 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
711 * VCE team suggest use bit 3--bit 6 for busy status check
713 mutex_lock(&adev->grbm_idx_mutex);
714 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
715 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
716 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
717 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
719 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
720 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
721 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
722 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
724 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
725 mutex_unlock(&adev->grbm_idx_mutex);
727 if (srbm_soft_reset) {
728 adev->vce.srbm_soft_reset = srbm_soft_reset;
731 adev->vce.srbm_soft_reset = 0;
736 static int vce_v4_0_soft_reset(void *handle)
738 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
741 if (!adev->vce.srbm_soft_reset)
743 srbm_soft_reset = adev->vce.srbm_soft_reset;
745 if (srbm_soft_reset) {
748 tmp = RREG32(mmSRBM_SOFT_RESET);
749 tmp |= srbm_soft_reset;
750 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
751 WREG32(mmSRBM_SOFT_RESET, tmp);
752 tmp = RREG32(mmSRBM_SOFT_RESET);
756 tmp &= ~srbm_soft_reset;
757 WREG32(mmSRBM_SOFT_RESET, tmp);
758 tmp = RREG32(mmSRBM_SOFT_RESET);
760 /* Wait a little for things to settle down */
767 static int vce_v4_0_pre_soft_reset(void *handle)
769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
771 if (!adev->vce.srbm_soft_reset)
776 return vce_v4_0_suspend(adev);
780 static int vce_v4_0_post_soft_reset(void *handle)
782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
784 if (!adev->vce.srbm_soft_reset)
789 return vce_v4_0_resume(adev);
792 static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
796 tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
798 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
800 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
803 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
806 static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
811 /* Set Override to disable Clock Gating */
812 vce_v4_0_override_vce_clock_gating(adev, true);
814 /* This function enables MGCG which is controlled by firmware.
815 With the clocks in the gated state the core is still
816 accessible but the firmware will throttle the clocks on the
820 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
823 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
825 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
828 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
830 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
833 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
835 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
837 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
839 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
840 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
841 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
842 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
844 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
846 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
849 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
851 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
853 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
855 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
857 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
859 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
861 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
863 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
864 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
865 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
866 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
868 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
870 vce_v4_0_override_vce_clock_gating(adev, false);
873 static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
875 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
878 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
880 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
882 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
885 static int vce_v4_0_set_clockgating_state(void *handle,
886 enum amd_clockgating_state state)
888 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
889 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
892 if ((adev->asic_type == CHIP_POLARIS10) ||
893 (adev->asic_type == CHIP_TONGA) ||
894 (adev->asic_type == CHIP_FIJI))
895 vce_v4_0_set_bypass_mode(adev, enable);
897 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
900 mutex_lock(&adev->grbm_idx_mutex);
901 for (i = 0; i < 2; i++) {
902 /* Program VCE Instance 0 or 1 if not harvested */
903 if (adev->vce.harvest_config & (1 << i))
906 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
909 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
910 uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
911 data &= ~(0xf | 0xff0);
912 data |= ((0x0 << 0) | (0x04 << 4));
913 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
915 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
916 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
917 data &= ~(0xf | 0xff0);
918 data |= ((0x0 << 0) | (0x04 << 4));
919 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
922 vce_v4_0_set_vce_sw_clock_gating(adev, enable);
925 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
926 mutex_unlock(&adev->grbm_idx_mutex);
931 static int vce_v4_0_set_powergating_state(void *handle,
932 enum amd_powergating_state state)
934 /* This doesn't actually powergate the VCE block.
935 * That's done in the dpm code via the SMC. This
936 * just re-inits the block as necessary. The actual
937 * gating still happens in the dpm code. We should
938 * revisit this when there is a cleaner line between
939 * the smc and the hw blocks
941 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
943 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
946 if (state == AMD_PG_STATE_GATE)
947 /* XXX do we need a vce_v4_0_stop()? */
950 return vce_v4_0_start(adev);
954 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
955 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
957 amdgpu_ring_write(ring, VCE_CMD_IB_VM);
958 amdgpu_ring_write(ring, vmid);
959 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
960 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
961 amdgpu_ring_write(ring, ib->length_dw);
964 static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
965 u64 seq, unsigned flags)
967 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
969 amdgpu_ring_write(ring, VCE_CMD_FENCE);
970 amdgpu_ring_write(ring, addr);
971 amdgpu_ring_write(ring, upper_32_bits(addr));
972 amdgpu_ring_write(ring, seq);
973 amdgpu_ring_write(ring, VCE_CMD_TRAP);
976 static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
978 amdgpu_ring_write(ring, VCE_CMD_END);
981 static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
982 uint32_t val, uint32_t mask)
984 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
985 amdgpu_ring_write(ring, reg << 2);
986 amdgpu_ring_write(ring, mask);
987 amdgpu_ring_write(ring, val);
990 static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
991 unsigned int vmid, uint64_t pd_addr)
993 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
995 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
997 /* wait for reg writes */
998 vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
999 lower_32_bits(pd_addr), 0xffffffff);
1002 static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
1003 uint32_t reg, uint32_t val)
1005 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
1006 amdgpu_ring_write(ring, reg << 2);
1007 amdgpu_ring_write(ring, val);
1010 static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
1011 struct amdgpu_irq_src *source,
1013 enum amdgpu_interrupt_state state)
1017 if (!amdgpu_sriov_vf(adev)) {
1018 if (state == AMDGPU_IRQ_STATE_ENABLE)
1019 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
1021 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
1022 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
1027 static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
1028 struct amdgpu_irq_src *source,
1029 struct amdgpu_iv_entry *entry)
1031 DRM_DEBUG("IH: VCE\n");
1033 switch (entry->src_data[0]) {
1037 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
1040 DRM_ERROR("Unhandled interrupt: %d %d\n",
1041 entry->src_id, entry->src_data[0]);
1048 const struct amd_ip_funcs vce_v4_0_ip_funcs = {
1050 .early_init = vce_v4_0_early_init,
1052 .sw_init = vce_v4_0_sw_init,
1053 .sw_fini = vce_v4_0_sw_fini,
1054 .hw_init = vce_v4_0_hw_init,
1055 .hw_fini = vce_v4_0_hw_fini,
1056 .suspend = vce_v4_0_suspend,
1057 .resume = vce_v4_0_resume,
1058 .is_idle = NULL /* vce_v4_0_is_idle */,
1059 .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
1060 .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
1061 .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
1062 .soft_reset = NULL /* vce_v4_0_soft_reset */,
1063 .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
1064 .set_clockgating_state = vce_v4_0_set_clockgating_state,
1065 .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
1068 static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1069 .type = AMDGPU_RING_TYPE_VCE,
1071 .nop = VCE_CMD_NO_OP,
1072 .support_64bit_ptrs = false,
1073 .vmhub = AMDGPU_MMHUB,
1074 .get_rptr = vce_v4_0_ring_get_rptr,
1075 .get_wptr = vce_v4_0_ring_get_wptr,
1076 .set_wptr = vce_v4_0_ring_set_wptr,
1077 .parse_cs = amdgpu_vce_ring_parse_cs_vm,
1079 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1080 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1081 4 + /* vce_v4_0_emit_vm_flush */
1082 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1083 1, /* vce_v4_0_ring_insert_end */
1084 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
1085 .emit_ib = vce_v4_0_ring_emit_ib,
1086 .emit_vm_flush = vce_v4_0_emit_vm_flush,
1087 .emit_fence = vce_v4_0_ring_emit_fence,
1088 .test_ring = amdgpu_vce_ring_test_ring,
1089 .test_ib = amdgpu_vce_ring_test_ib,
1090 .insert_nop = amdgpu_ring_insert_nop,
1091 .insert_end = vce_v4_0_ring_insert_end,
1092 .pad_ib = amdgpu_ring_generic_pad_ib,
1093 .begin_use = amdgpu_vce_ring_begin_use,
1094 .end_use = amdgpu_vce_ring_end_use,
1095 .emit_wreg = vce_v4_0_emit_wreg,
1096 .emit_reg_wait = vce_v4_0_emit_reg_wait,
1097 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1100 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1104 for (i = 0; i < adev->vce.num_rings; i++) {
1105 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
1106 adev->vce.ring[i].me = i;
1108 DRM_INFO("VCE enabled in VM mode\n");
1111 static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
1112 .set = vce_v4_0_set_interrupt_state,
1113 .process = vce_v4_0_process_interrupt,
1116 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1118 adev->vce.irq.num_types = 1;
1119 adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
1122 const struct amdgpu_ip_block_version vce_v4_0_ip_block =
1124 .type = AMD_IP_BLOCK_TYPE_VCE,
1128 .funcs = &vce_v4_0_ip_funcs,