2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 #include <linux/firmware.h>
31 #include "amdgpu_vce.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
40 #include "gca/gfx_8_0_d.h"
41 #include "gca/gfx_8_0_sh_mask.h"
44 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
45 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
46 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
47 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
48 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
49 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
51 #define VCE_V3_0_FW_SIZE (384 * 1024)
52 #define VCE_V3_0_STACK_SIZE (64 * 1024)
53 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
55 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
56 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
57 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
58 static int vce_v3_0_wait_for_idle(void *handle);
61 * vce_v3_0_ring_get_rptr - get read pointer
63 * @ring: amdgpu_ring pointer
65 * Returns the current hardware read pointer
67 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
69 struct amdgpu_device *adev = ring->adev;
71 if (ring == &adev->vce.ring[0])
72 return RREG32(mmVCE_RB_RPTR);
74 return RREG32(mmVCE_RB_RPTR2);
78 * vce_v3_0_ring_get_wptr - get write pointer
80 * @ring: amdgpu_ring pointer
82 * Returns the current hardware write pointer
84 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
86 struct amdgpu_device *adev = ring->adev;
88 if (ring == &adev->vce.ring[0])
89 return RREG32(mmVCE_RB_WPTR);
91 return RREG32(mmVCE_RB_WPTR2);
95 * vce_v3_0_ring_set_wptr - set write pointer
97 * @ring: amdgpu_ring pointer
99 * Commits the write pointer to the hardware
101 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
103 struct amdgpu_device *adev = ring->adev;
105 if (ring == &adev->vce.ring[0])
106 WREG32(mmVCE_RB_WPTR, ring->wptr);
108 WREG32(mmVCE_RB_WPTR2, ring->wptr);
111 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
113 WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
116 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
121 /* Set Override to disable Clock Gating */
122 vce_v3_0_override_vce_clock_gating(adev, true);
124 /* This function enables MGCG which is controlled by firmware.
125 With the clocks in the gated state the core is still
126 accessible but the firmware will throttle the clocks on the
130 data = RREG32(mmVCE_CLOCK_GATING_B);
133 WREG32(mmVCE_CLOCK_GATING_B, data);
135 data = RREG32(mmVCE_UENC_CLOCK_GATING);
138 WREG32(mmVCE_UENC_CLOCK_GATING, data);
140 data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
143 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
145 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
147 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
149 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
150 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
151 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
152 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
154 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
156 data = RREG32(mmVCE_CLOCK_GATING_B);
159 WREG32(mmVCE_CLOCK_GATING_B, data);
161 data = RREG32(mmVCE_UENC_CLOCK_GATING);
163 WREG32(mmVCE_UENC_CLOCK_GATING, data);
165 data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
167 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
169 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
171 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
173 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
174 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
175 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
176 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
178 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
180 vce_v3_0_override_vce_clock_gating(adev, false);
183 static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
187 for (i = 0; i < 10; ++i) {
188 for (j = 0; j < 100; ++j) {
189 uint32_t status = RREG32(mmVCE_STATUS);
191 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
196 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
197 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
199 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
207 * vce_v3_0_start - start VCE block
209 * @adev: amdgpu_device pointer
211 * Setup and start the VCE block
213 static int vce_v3_0_start(struct amdgpu_device *adev)
215 struct amdgpu_ring *ring;
218 ring = &adev->vce.ring[0];
219 WREG32(mmVCE_RB_RPTR, ring->wptr);
220 WREG32(mmVCE_RB_WPTR, ring->wptr);
221 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
222 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
223 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
225 ring = &adev->vce.ring[1];
226 WREG32(mmVCE_RB_RPTR2, ring->wptr);
227 WREG32(mmVCE_RB_WPTR2, ring->wptr);
228 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
229 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
230 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
232 mutex_lock(&adev->grbm_idx_mutex);
233 for (idx = 0; idx < 2; ++idx) {
234 if (adev->vce.harvest_config & (1 << idx))
237 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
238 vce_v3_0_mc_resume(adev, idx);
239 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
241 if (adev->asic_type >= CHIP_STONEY)
242 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
244 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
246 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
249 r = vce_v3_0_firmware_loaded(adev);
251 /* clear BUSY flag */
252 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
255 DRM_ERROR("VCE not responding, giving up!!!\n");
256 mutex_unlock(&adev->grbm_idx_mutex);
261 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
262 mutex_unlock(&adev->grbm_idx_mutex);
267 static int vce_v3_0_stop(struct amdgpu_device *adev)
271 mutex_lock(&adev->grbm_idx_mutex);
272 for (idx = 0; idx < 2; ++idx) {
273 if (adev->vce.harvest_config & (1 << idx))
276 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
278 if (adev->asic_type >= CHIP_STONEY)
279 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
281 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
284 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
286 /* clear BUSY flag */
287 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
289 /* Set Clock-Gating off */
290 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
291 vce_v3_0_set_vce_sw_clock_gating(adev, false);
294 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
295 mutex_unlock(&adev->grbm_idx_mutex);
300 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
301 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
302 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
304 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
308 /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
309 if ((adev->asic_type == CHIP_FIJI) ||
310 (adev->asic_type == CHIP_STONEY) ||
311 (adev->asic_type == CHIP_POLARIS10) ||
312 (adev->asic_type == CHIP_POLARIS11))
313 return AMDGPU_VCE_HARVEST_VCE1;
315 /* Tonga and CZ are dual or single pipe */
316 if (adev->flags & AMD_IS_APU)
317 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
318 VCE_HARVEST_FUSE_MACRO__MASK) >>
319 VCE_HARVEST_FUSE_MACRO__SHIFT;
321 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
322 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
323 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
327 return AMDGPU_VCE_HARVEST_VCE0;
329 return AMDGPU_VCE_HARVEST_VCE1;
331 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
337 static int vce_v3_0_early_init(void *handle)
339 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
341 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
343 if ((adev->vce.harvest_config &
344 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
345 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
348 vce_v3_0_set_ring_funcs(adev);
349 vce_v3_0_set_irq_funcs(adev);
354 static int vce_v3_0_sw_init(void *handle)
356 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
357 struct amdgpu_ring *ring;
361 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
365 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
366 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
370 r = amdgpu_vce_resume(adev);
374 ring = &adev->vce.ring[0];
375 sprintf(ring->name, "vce0");
376 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
377 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
381 ring = &adev->vce.ring[1];
382 sprintf(ring->name, "vce1");
383 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
384 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
391 static int vce_v3_0_sw_fini(void *handle)
394 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
396 r = amdgpu_vce_suspend(adev);
400 r = amdgpu_vce_sw_fini(adev);
407 static int vce_v3_0_hw_init(void *handle)
410 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
412 r = vce_v3_0_start(adev);
416 adev->vce.ring[0].ready = false;
417 adev->vce.ring[1].ready = false;
419 for (i = 0; i < 2; i++) {
420 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
424 adev->vce.ring[i].ready = true;
427 DRM_INFO("VCE initialized successfully.\n");
432 static int vce_v3_0_hw_fini(void *handle)
435 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
437 r = vce_v3_0_wait_for_idle(handle);
441 return vce_v3_0_stop(adev);
444 static int vce_v3_0_suspend(void *handle)
447 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
449 r = vce_v3_0_hw_fini(adev);
453 r = amdgpu_vce_suspend(adev);
460 static int vce_v3_0_resume(void *handle)
463 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
465 r = amdgpu_vce_resume(adev);
469 r = vce_v3_0_hw_init(adev);
476 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
478 uint32_t offset, size;
480 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
481 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
482 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
483 WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
485 WREG32(mmVCE_LMI_CTRL, 0x00398000);
486 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
487 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
488 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
489 WREG32(mmVCE_LMI_VM_CTRL, 0);
490 if (adev->asic_type >= CHIP_STONEY) {
491 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
492 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
493 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
495 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
496 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
497 size = VCE_V3_0_FW_SIZE;
498 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
499 WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
503 size = VCE_V3_0_STACK_SIZE;
504 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
505 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
507 size = VCE_V3_0_DATA_SIZE;
508 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
509 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
511 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
512 size = VCE_V3_0_STACK_SIZE;
513 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
514 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
516 size = VCE_V3_0_DATA_SIZE;
517 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
518 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
521 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
522 WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
525 static bool vce_v3_0_is_idle(void *handle)
527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
530 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
531 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
533 return !(RREG32(mmSRBM_STATUS2) & mask);
536 static int vce_v3_0_wait_for_idle(void *handle)
539 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
541 for (i = 0; i < adev->usec_timeout; i++)
542 if (vce_v3_0_is_idle(handle))
548 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
549 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
550 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
551 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
552 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
554 static int vce_v3_0_check_soft_reset(void *handle)
556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
557 u32 srbm_soft_reset = 0;
559 /* According to VCE team , we should use VCE_STATUS instead
560 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
561 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
562 * instance's registers are accessed
563 * (0 for 1st instance, 10 for 2nd instance).
566 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
567 *|----+----+-----------+----+----+----+----------+---------+----|
568 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
570 * VCE team suggest use bit 3--bit 6 for busy status check
572 mutex_lock(&adev->grbm_idx_mutex);
573 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
574 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
575 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
576 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
578 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
579 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
580 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
581 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
583 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
585 if (srbm_soft_reset) {
586 adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
587 adev->vce.srbm_soft_reset = srbm_soft_reset;
589 adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
590 adev->vce.srbm_soft_reset = 0;
592 mutex_unlock(&adev->grbm_idx_mutex);
596 static int vce_v3_0_soft_reset(void *handle)
598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
601 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
603 srbm_soft_reset = adev->vce.srbm_soft_reset;
605 if (srbm_soft_reset) {
608 tmp = RREG32(mmSRBM_SOFT_RESET);
609 tmp |= srbm_soft_reset;
610 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
611 WREG32(mmSRBM_SOFT_RESET, tmp);
612 tmp = RREG32(mmSRBM_SOFT_RESET);
616 tmp &= ~srbm_soft_reset;
617 WREG32(mmSRBM_SOFT_RESET, tmp);
618 tmp = RREG32(mmSRBM_SOFT_RESET);
620 /* Wait a little for things to settle down */
627 static int vce_v3_0_pre_soft_reset(void *handle)
629 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
631 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
636 return vce_v3_0_suspend(adev);
640 static int vce_v3_0_post_soft_reset(void *handle)
642 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
644 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
649 return vce_v3_0_resume(adev);
652 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
653 struct amdgpu_irq_src *source,
655 enum amdgpu_interrupt_state state)
659 if (state == AMDGPU_IRQ_STATE_ENABLE)
660 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
662 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
666 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
667 struct amdgpu_irq_src *source,
668 struct amdgpu_iv_entry *entry)
670 DRM_DEBUG("IH: VCE\n");
672 WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
674 switch (entry->src_data) {
677 amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
680 DRM_ERROR("Unhandled interrupt: %d %d\n",
681 entry->src_id, entry->src_data);
688 static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
690 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
693 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
695 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
697 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
700 static int vce_v3_0_set_clockgating_state(void *handle,
701 enum amd_clockgating_state state)
703 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
704 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
707 if (adev->asic_type == CHIP_POLARIS10)
708 vce_v3_set_bypass_mode(adev, enable);
710 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
713 mutex_lock(&adev->grbm_idx_mutex);
714 for (i = 0; i < 2; i++) {
715 /* Program VCE Instance 0 or 1 if not harvested */
716 if (adev->vce.harvest_config & (1 << i))
719 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
722 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
723 uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
724 data &= ~(0xf | 0xff0);
725 data |= ((0x0 << 0) | (0x04 << 4));
726 WREG32(mmVCE_CLOCK_GATING_A, data);
728 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
729 data = RREG32(mmVCE_UENC_CLOCK_GATING);
730 data &= ~(0xf | 0xff0);
731 data |= ((0x0 << 0) | (0x04 << 4));
732 WREG32(mmVCE_UENC_CLOCK_GATING, data);
735 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
738 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
739 mutex_unlock(&adev->grbm_idx_mutex);
744 static int vce_v3_0_set_powergating_state(void *handle,
745 enum amd_powergating_state state)
747 /* This doesn't actually powergate the VCE block.
748 * That's done in the dpm code via the SMC. This
749 * just re-inits the block as necessary. The actual
750 * gating still happens in the dpm code. We should
751 * revisit this when there is a cleaner line between
752 * the smc and the hw blocks
754 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
756 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
759 if (state == AMD_PG_STATE_GATE)
760 /* XXX do we need a vce_v3_0_stop()? */
763 return vce_v3_0_start(adev);
766 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
768 .early_init = vce_v3_0_early_init,
770 .sw_init = vce_v3_0_sw_init,
771 .sw_fini = vce_v3_0_sw_fini,
772 .hw_init = vce_v3_0_hw_init,
773 .hw_fini = vce_v3_0_hw_fini,
774 .suspend = vce_v3_0_suspend,
775 .resume = vce_v3_0_resume,
776 .is_idle = vce_v3_0_is_idle,
777 .wait_for_idle = vce_v3_0_wait_for_idle,
778 .check_soft_reset = vce_v3_0_check_soft_reset,
779 .pre_soft_reset = vce_v3_0_pre_soft_reset,
780 .soft_reset = vce_v3_0_soft_reset,
781 .post_soft_reset = vce_v3_0_post_soft_reset,
782 .set_clockgating_state = vce_v3_0_set_clockgating_state,
783 .set_powergating_state = vce_v3_0_set_powergating_state,
786 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
787 .get_rptr = vce_v3_0_ring_get_rptr,
788 .get_wptr = vce_v3_0_ring_get_wptr,
789 .set_wptr = vce_v3_0_ring_set_wptr,
790 .parse_cs = amdgpu_vce_ring_parse_cs,
791 .emit_ib = amdgpu_vce_ring_emit_ib,
792 .emit_fence = amdgpu_vce_ring_emit_fence,
793 .test_ring = amdgpu_vce_ring_test_ring,
794 .test_ib = amdgpu_vce_ring_test_ib,
795 .insert_nop = amdgpu_ring_insert_nop,
796 .pad_ib = amdgpu_ring_generic_pad_ib,
797 .begin_use = amdgpu_vce_ring_begin_use,
798 .end_use = amdgpu_vce_ring_end_use,
801 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
803 adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
804 adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
807 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
808 .set = vce_v3_0_set_interrupt_state,
809 .process = vce_v3_0_process_interrupt,
812 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
814 adev->vce.irq.num_types = 1;
815 adev->vce.irq.funcs = &vce_v3_0_irq_funcs;