2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <drm/drm_drv.h>
31 #include "amdgpu_vce.h"
34 #include "soc15_common.h"
35 #include "mmsch_v1_0.h"
37 #include "vce/vce_4_0_offset.h"
38 #include "vce/vce_4_0_default.h"
39 #include "vce/vce_4_0_sh_mask.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "mmhub/mmhub_1_0_sh_mask.h"
43 #include "ivsrcid/vce/irqsrcs_vce_4_0.h"
45 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
47 #define VCE_V4_0_FW_SIZE (384 * 1024)
48 #define VCE_V4_0_STACK_SIZE (64 * 1024)
49 #define VCE_V4_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
51 static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
52 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
56 * vce_v4_0_ring_get_rptr - get read pointer
58 * @ring: amdgpu_ring pointer
60 * Returns the current hardware read pointer
62 static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
64 struct amdgpu_device *adev = ring->adev;
67 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
68 else if (ring->me == 1)
69 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
71 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
75 * vce_v4_0_ring_get_wptr - get write pointer
77 * @ring: amdgpu_ring pointer
79 * Returns the current hardware write pointer
81 static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
83 struct amdgpu_device *adev = ring->adev;
85 if (ring->use_doorbell)
86 return *ring->wptr_cpu_addr;
89 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
90 else if (ring->me == 1)
91 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
93 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
97 * vce_v4_0_ring_set_wptr - set write pointer
99 * @ring: amdgpu_ring pointer
101 * Commits the write pointer to the hardware
103 static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
105 struct amdgpu_device *adev = ring->adev;
107 if (ring->use_doorbell) {
108 /* XXX check if swapping is necessary on BE */
109 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
110 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
115 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
116 lower_32_bits(ring->wptr));
117 else if (ring->me == 1)
118 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
119 lower_32_bits(ring->wptr));
121 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
122 lower_32_bits(ring->wptr));
125 static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
129 for (i = 0; i < 10; ++i) {
130 for (j = 0; j < 100; ++j) {
132 RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
134 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
139 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
140 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
141 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
142 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
144 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
145 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
153 static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
154 struct amdgpu_mm_table *table)
156 uint32_t data = 0, loop;
157 uint64_t addr = table->gpu_addr;
158 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
161 size = header->header_size + header->vce_table_size + header->uvd_table_size;
163 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
164 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
165 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
167 /* 2, update vmid of descriptor */
168 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
169 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
170 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
171 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
173 /* 3, notify mmsch about the size of this descriptor */
174 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
176 /* 4, set resp to zero */
177 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
179 WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
180 *adev->vce.ring[0].wptr_cpu_addr = 0;
181 adev->vce.ring[0].wptr = 0;
182 adev->vce.ring[0].wptr_old = 0;
184 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
185 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
187 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
189 while ((data & 0x10000002) != 0x10000002) {
191 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
198 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
205 static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
207 struct amdgpu_ring *ring;
208 uint32_t offset, size;
209 uint32_t table_size = 0;
210 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
211 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
212 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
213 struct mmsch_v1_0_cmd_end end = { { 0 } };
214 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
215 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
217 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
218 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
219 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
220 end.cmd_header.command_type = MMSCH_COMMAND__END;
222 if (header->vce_table_offset == 0 && header->vce_table_size == 0) {
223 header->version = MMSCH_VERSION;
224 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
226 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0)
227 header->vce_table_offset = header->header_size;
229 header->vce_table_offset = header->uvd_table_size + header->uvd_table_offset;
231 init_table += header->vce_table_offset;
233 ring = &adev->vce.ring[0];
234 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
235 lower_32_bits(ring->gpu_addr));
236 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
237 upper_32_bits(ring->gpu_addr));
238 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
239 ring->ring_size / 4);
241 /* BEGING OF MC_RESUME */
242 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
243 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
246 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
248 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
249 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
250 uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
251 uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
252 uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
254 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
255 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
256 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
257 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
258 (tmr_mc_addr >> 40) & 0xff);
259 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
261 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
262 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
263 adev->vce.gpu_addr >> 8);
264 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
265 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
266 (adev->vce.gpu_addr >> 40) & 0xff);
267 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
268 offset & ~0x0f000000);
271 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
272 mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
273 adev->vce.gpu_addr >> 8);
274 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
275 mmVCE_LMI_VCPU_CACHE_64BIT_BAR1),
276 (adev->vce.gpu_addr >> 40) & 0xff);
277 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
278 mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
279 adev->vce.gpu_addr >> 8);
280 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
281 mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
282 (adev->vce.gpu_addr >> 40) & 0xff);
284 size = VCE_V4_0_FW_SIZE;
285 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
287 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
288 size = VCE_V4_0_STACK_SIZE;
289 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
290 (offset & ~0x0f000000) | (1 << 24));
291 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
294 size = VCE_V4_0_DATA_SIZE;
295 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
296 (offset & ~0x0f000000) | (2 << 24));
297 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
299 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
300 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
301 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
302 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
304 /* end of MC_RESUME */
305 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
306 VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
307 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
308 ~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
309 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
310 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
312 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
313 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
314 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
316 /* clear BUSY flag */
317 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
318 ~VCE_STATUS__JOB_BUSY_MASK, 0);
321 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
322 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
323 header->vce_table_size = table_size;
326 return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
330 * vce_v4_0_start - start VCE block
332 * @adev: amdgpu_device pointer
334 * Setup and start the VCE block
336 static int vce_v4_0_start(struct amdgpu_device *adev)
338 struct amdgpu_ring *ring;
341 ring = &adev->vce.ring[0];
343 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
344 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
345 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
346 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
347 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
349 ring = &adev->vce.ring[1];
351 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
352 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
353 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
354 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
355 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
357 ring = &adev->vce.ring[2];
359 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
360 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
361 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
362 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
363 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
365 vce_v4_0_mc_resume(adev);
366 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
367 ~VCE_STATUS__JOB_BUSY_MASK);
369 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
371 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
372 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
375 r = vce_v4_0_firmware_loaded(adev);
377 /* clear BUSY flag */
378 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
381 DRM_ERROR("VCE not responding, giving up!!!\n");
388 static int vce_v4_0_stop(struct amdgpu_device *adev)
392 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
395 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
396 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
397 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
399 /* clear VCE_STATUS */
400 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0);
402 /* Set Clock-Gating off */
403 /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
404 vce_v4_0_set_vce_sw_clock_gating(adev, false);
410 static int vce_v4_0_early_init(void *handle)
412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
414 if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
415 adev->vce.num_rings = 1;
417 adev->vce.num_rings = 3;
419 vce_v4_0_set_ring_funcs(adev);
420 vce_v4_0_set_irq_funcs(adev);
425 static int vce_v4_0_sw_init(void *handle)
427 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
428 struct amdgpu_ring *ring;
433 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
437 size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE;
438 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
439 size += VCE_V4_0_FW_SIZE;
441 r = amdgpu_vce_sw_init(adev, size);
445 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
446 const struct common_firmware_header *hdr;
447 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
449 adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
450 if (!adev->vce.saved_bo)
453 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
454 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
455 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
456 adev->firmware.fw_size +=
457 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
458 DRM_INFO("PSP loading VCE firmware\n");
460 r = amdgpu_vce_resume(adev);
465 for (i = 0; i < adev->vce.num_rings; i++) {
466 enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
468 ring = &adev->vce.ring[i];
469 sprintf(ring->name, "vce%d", i);
470 if (amdgpu_sriov_vf(adev)) {
471 /* DOORBELL only works under SRIOV */
472 ring->use_doorbell = true;
474 /* currently only use the first encoding ring for sriov,
475 * so set unused location for other unused rings.
478 ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
480 ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
482 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
489 r = amdgpu_vce_entity_init(adev);
493 r = amdgpu_virt_alloc_mm_table(adev);
500 static int vce_v4_0_sw_fini(void *handle)
503 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
506 amdgpu_virt_free_mm_table(adev);
508 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
509 kvfree(adev->vce.saved_bo);
510 adev->vce.saved_bo = NULL;
513 r = amdgpu_vce_suspend(adev);
517 return amdgpu_vce_sw_fini(adev);
520 static int vce_v4_0_hw_init(void *handle)
523 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
525 if (amdgpu_sriov_vf(adev))
526 r = vce_v4_0_sriov_start(adev);
528 r = vce_v4_0_start(adev);
532 for (i = 0; i < adev->vce.num_rings; i++) {
533 r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
538 DRM_INFO("VCE initialized successfully.\n");
543 static int vce_v4_0_hw_fini(void *handle)
545 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
547 cancel_delayed_work_sync(&adev->vce.idle_work);
549 if (!amdgpu_sriov_vf(adev)) {
550 /* vce_v4_0_wait_for_idle(handle); */
553 /* full access mode, so don't touch any VCE register */
554 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
560 static int vce_v4_0_suspend(void *handle)
562 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
565 if (adev->vce.vcpu_bo == NULL)
568 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
569 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
570 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
571 void *ptr = adev->vce.cpu_addr;
573 memcpy_fromio(adev->vce.saved_bo, ptr, size);
579 * Proper cleanups before halting the HW engine:
580 * - cancel the delayed idle work
581 * - enable powergating
582 * - enable clockgating
585 * TODO: to align with the VCN implementation, move the
586 * jobs for clockgating/powergating/dpm setting to
587 * ->set_powergating_state().
589 cancel_delayed_work_sync(&adev->vce.idle_work);
591 if (adev->pm.dpm_enabled) {
592 amdgpu_dpm_enable_vce(adev, false);
594 amdgpu_asic_set_vce_clocks(adev, 0, 0);
595 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
597 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
601 r = vce_v4_0_hw_fini(adev);
605 return amdgpu_vce_suspend(adev);
608 static int vce_v4_0_resume(void *handle)
610 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
613 if (adev->vce.vcpu_bo == NULL)
616 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
618 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
619 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
620 void *ptr = adev->vce.cpu_addr;
622 memcpy_toio(ptr, adev->vce.saved_bo, size);
626 r = amdgpu_vce_resume(adev);
631 return vce_v4_0_hw_init(adev);
634 static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
636 uint32_t offset, size;
637 uint64_t tmr_mc_addr;
639 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
640 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
641 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
642 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
644 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
645 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
646 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
647 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
648 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
650 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
652 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
653 tmr_mc_addr = (uint64_t)(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi) << 32 |
654 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
655 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
657 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
658 (tmr_mc_addr >> 40) & 0xff);
659 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
661 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
662 (adev->vce.gpu_addr >> 8));
663 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
664 (adev->vce.gpu_addr >> 40) & 0xff);
665 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
668 size = VCE_V4_0_FW_SIZE;
669 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
671 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
672 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
673 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
674 size = VCE_V4_0_STACK_SIZE;
675 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
676 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
678 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
679 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
681 size = VCE_V4_0_DATA_SIZE;
682 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
683 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
685 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
686 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
687 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
688 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
691 static int vce_v4_0_set_clockgating_state(void *handle,
692 enum amd_clockgating_state state)
694 /* needed for driver unload*/
699 static bool vce_v4_0_is_idle(void *handle)
701 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
704 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
705 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
707 return !(RREG32(mmSRBM_STATUS2) & mask);
710 static int vce_v4_0_wait_for_idle(void *handle)
713 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
715 for (i = 0; i < adev->usec_timeout; i++)
716 if (vce_v4_0_is_idle(handle))
722 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
723 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
724 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
725 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
726 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
728 static bool vce_v4_0_check_soft_reset(void *handle)
730 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
731 u32 srbm_soft_reset = 0;
733 /* According to VCE team , we should use VCE_STATUS instead
734 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
735 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
736 * instance's registers are accessed
737 * (0 for 1st instance, 10 for 2nd instance).
740 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
741 *|----+----+-----------+----+----+----+----------+---------+----|
742 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
744 * VCE team suggest use bit 3--bit 6 for busy status check
746 mutex_lock(&adev->grbm_idx_mutex);
747 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
748 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
749 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
750 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
752 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
753 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
754 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
755 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
757 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
758 mutex_unlock(&adev->grbm_idx_mutex);
760 if (srbm_soft_reset) {
761 adev->vce.srbm_soft_reset = srbm_soft_reset;
764 adev->vce.srbm_soft_reset = 0;
769 static int vce_v4_0_soft_reset(void *handle)
771 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774 if (!adev->vce.srbm_soft_reset)
776 srbm_soft_reset = adev->vce.srbm_soft_reset;
778 if (srbm_soft_reset) {
781 tmp = RREG32(mmSRBM_SOFT_RESET);
782 tmp |= srbm_soft_reset;
783 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
784 WREG32(mmSRBM_SOFT_RESET, tmp);
785 tmp = RREG32(mmSRBM_SOFT_RESET);
789 tmp &= ~srbm_soft_reset;
790 WREG32(mmSRBM_SOFT_RESET, tmp);
791 tmp = RREG32(mmSRBM_SOFT_RESET);
793 /* Wait a little for things to settle down */
800 static int vce_v4_0_pre_soft_reset(void *handle)
802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
804 if (!adev->vce.srbm_soft_reset)
809 return vce_v4_0_suspend(adev);
813 static int vce_v4_0_post_soft_reset(void *handle)
815 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
817 if (!adev->vce.srbm_soft_reset)
822 return vce_v4_0_resume(adev);
825 static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
829 tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
831 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
833 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
836 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
839 static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
844 /* Set Override to disable Clock Gating */
845 vce_v4_0_override_vce_clock_gating(adev, true);
847 /* This function enables MGCG which is controlled by firmware.
848 With the clocks in the gated state the core is still
849 accessible but the firmware will throttle the clocks on the
853 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
856 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
858 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
861 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
863 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
866 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
868 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
870 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
872 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
873 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
874 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
875 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
877 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
879 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
882 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
884 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
886 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
888 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
890 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
892 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
894 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
896 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
897 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
898 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
899 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
901 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
903 vce_v4_0_override_vce_clock_gating(adev, false);
906 static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
908 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
911 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
913 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
915 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
918 static int vce_v4_0_set_clockgating_state(void *handle,
919 enum amd_clockgating_state state)
921 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
922 bool enable = (state == AMD_CG_STATE_GATE);
925 if ((adev->asic_type == CHIP_POLARIS10) ||
926 (adev->asic_type == CHIP_TONGA) ||
927 (adev->asic_type == CHIP_FIJI))
928 vce_v4_0_set_bypass_mode(adev, enable);
930 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
933 mutex_lock(&adev->grbm_idx_mutex);
934 for (i = 0; i < 2; i++) {
935 /* Program VCE Instance 0 or 1 if not harvested */
936 if (adev->vce.harvest_config & (1 << i))
939 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
942 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
943 uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
944 data &= ~(0xf | 0xff0);
945 data |= ((0x0 << 0) | (0x04 << 4));
946 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
948 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
949 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
950 data &= ~(0xf | 0xff0);
951 data |= ((0x0 << 0) | (0x04 << 4));
952 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
955 vce_v4_0_set_vce_sw_clock_gating(adev, enable);
958 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
959 mutex_unlock(&adev->grbm_idx_mutex);
965 static int vce_v4_0_set_powergating_state(void *handle,
966 enum amd_powergating_state state)
968 /* This doesn't actually powergate the VCE block.
969 * That's done in the dpm code via the SMC. This
970 * just re-inits the block as necessary. The actual
971 * gating still happens in the dpm code. We should
972 * revisit this when there is a cleaner line between
973 * the smc and the hw blocks
975 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
977 if (state == AMD_PG_STATE_GATE)
978 return vce_v4_0_stop(adev);
980 return vce_v4_0_start(adev);
983 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
984 struct amdgpu_ib *ib, uint32_t flags)
986 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
988 amdgpu_ring_write(ring, VCE_CMD_IB_VM);
989 amdgpu_ring_write(ring, vmid);
990 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
991 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
992 amdgpu_ring_write(ring, ib->length_dw);
995 static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
996 u64 seq, unsigned flags)
998 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1000 amdgpu_ring_write(ring, VCE_CMD_FENCE);
1001 amdgpu_ring_write(ring, addr);
1002 amdgpu_ring_write(ring, upper_32_bits(addr));
1003 amdgpu_ring_write(ring, seq);
1004 amdgpu_ring_write(ring, VCE_CMD_TRAP);
1007 static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
1009 amdgpu_ring_write(ring, VCE_CMD_END);
1012 static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1013 uint32_t val, uint32_t mask)
1015 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
1016 amdgpu_ring_write(ring, reg << 2);
1017 amdgpu_ring_write(ring, mask);
1018 amdgpu_ring_write(ring, val);
1021 static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
1022 unsigned int vmid, uint64_t pd_addr)
1024 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1026 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1028 /* wait for reg writes */
1029 vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1030 vmid * hub->ctx_addr_distance,
1031 lower_32_bits(pd_addr), 0xffffffff);
1034 static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
1035 uint32_t reg, uint32_t val)
1037 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
1038 amdgpu_ring_write(ring, reg << 2);
1039 amdgpu_ring_write(ring, val);
1042 static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
1043 struct amdgpu_irq_src *source,
1045 enum amdgpu_interrupt_state state)
1049 if (!amdgpu_sriov_vf(adev)) {
1050 if (state == AMDGPU_IRQ_STATE_ENABLE)
1051 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
1053 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
1054 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
1059 static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
1060 struct amdgpu_irq_src *source,
1061 struct amdgpu_iv_entry *entry)
1063 DRM_DEBUG("IH: VCE\n");
1065 switch (entry->src_data[0]) {
1069 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
1072 DRM_ERROR("Unhandled interrupt: %d %d\n",
1073 entry->src_id, entry->src_data[0]);
1080 const struct amd_ip_funcs vce_v4_0_ip_funcs = {
1082 .early_init = vce_v4_0_early_init,
1084 .sw_init = vce_v4_0_sw_init,
1085 .sw_fini = vce_v4_0_sw_fini,
1086 .hw_init = vce_v4_0_hw_init,
1087 .hw_fini = vce_v4_0_hw_fini,
1088 .suspend = vce_v4_0_suspend,
1089 .resume = vce_v4_0_resume,
1090 .is_idle = NULL /* vce_v4_0_is_idle */,
1091 .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
1092 .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
1093 .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
1094 .soft_reset = NULL /* vce_v4_0_soft_reset */,
1095 .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
1096 .set_clockgating_state = vce_v4_0_set_clockgating_state,
1097 .set_powergating_state = vce_v4_0_set_powergating_state,
1100 static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1101 .type = AMDGPU_RING_TYPE_VCE,
1103 .nop = VCE_CMD_NO_OP,
1104 .support_64bit_ptrs = false,
1105 .no_user_fence = true,
1106 .vmhub = AMDGPU_MMHUB_0,
1107 .get_rptr = vce_v4_0_ring_get_rptr,
1108 .get_wptr = vce_v4_0_ring_get_wptr,
1109 .set_wptr = vce_v4_0_ring_set_wptr,
1110 .parse_cs = amdgpu_vce_ring_parse_cs_vm,
1112 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1113 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1114 4 + /* vce_v4_0_emit_vm_flush */
1115 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1116 1, /* vce_v4_0_ring_insert_end */
1117 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
1118 .emit_ib = vce_v4_0_ring_emit_ib,
1119 .emit_vm_flush = vce_v4_0_emit_vm_flush,
1120 .emit_fence = vce_v4_0_ring_emit_fence,
1121 .test_ring = amdgpu_vce_ring_test_ring,
1122 .test_ib = amdgpu_vce_ring_test_ib,
1123 .insert_nop = amdgpu_ring_insert_nop,
1124 .insert_end = vce_v4_0_ring_insert_end,
1125 .pad_ib = amdgpu_ring_generic_pad_ib,
1126 .begin_use = amdgpu_vce_ring_begin_use,
1127 .end_use = amdgpu_vce_ring_end_use,
1128 .emit_wreg = vce_v4_0_emit_wreg,
1129 .emit_reg_wait = vce_v4_0_emit_reg_wait,
1130 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1133 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1137 for (i = 0; i < adev->vce.num_rings; i++) {
1138 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
1139 adev->vce.ring[i].me = i;
1141 DRM_INFO("VCE enabled in VM mode\n");
1144 static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
1145 .set = vce_v4_0_set_interrupt_state,
1146 .process = vce_v4_0_process_interrupt,
1149 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1151 adev->vce.irq.num_types = 1;
1152 adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
1155 const struct amdgpu_ip_block_version vce_v4_0_ip_block =
1157 .type = AMD_IP_BLOCK_TYPE_VCE,
1161 .funcs = &vce_v4_0_ip_funcs,