2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "soc15_common.h"
32 #include "vcn/vcn_1_0_offset.h"
33 #include "vcn/vcn_1_0_sh_mask.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "mmhub/mmhub_9_1_offset.h"
36 #include "mmhub/mmhub_9_1_sh_mask.h"
38 static int vcn_v1_0_stop(struct amdgpu_device *adev);
39 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
40 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
41 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
44 * vcn_v1_0_early_init - set function pointers
46 * @handle: amdgpu_device pointer
48 * Set ring and irq function pointers
50 static int vcn_v1_0_early_init(void *handle)
52 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
54 adev->vcn.num_enc_rings = 2;
56 vcn_v1_0_set_dec_ring_funcs(adev);
57 vcn_v1_0_set_enc_ring_funcs(adev);
58 vcn_v1_0_set_irq_funcs(adev);
64 * vcn_v1_0_sw_init - sw init for VCN block
66 * @handle: amdgpu_device pointer
68 * Load firmware and sw initialization
70 static int vcn_v1_0_sw_init(void *handle)
72 struct amdgpu_ring *ring;
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
77 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
82 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
83 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
89 r = amdgpu_vcn_sw_init(adev);
93 r = amdgpu_vcn_resume(adev);
97 ring = &adev->vcn.ring_dec;
98 sprintf(ring->name, "vcn_dec");
99 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
103 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
104 ring = &adev->vcn.ring_enc[i];
105 sprintf(ring->name, "vcn_enc%d", i);
106 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
115 * vcn_v1_0_sw_fini - sw fini for VCN block
117 * @handle: amdgpu_device pointer
119 * VCN suspend and free up sw allocation
121 static int vcn_v1_0_sw_fini(void *handle)
124 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
126 r = amdgpu_vcn_suspend(adev);
130 r = amdgpu_vcn_sw_fini(adev);
136 * vcn_v1_0_hw_init - start and test VCN block
138 * @handle: amdgpu_device pointer
140 * Initialize the hardware, boot up the VCPU and do some testing
142 static int vcn_v1_0_hw_init(void *handle)
144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
145 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
149 r = amdgpu_ring_test_ring(ring);
155 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
156 ring = &adev->vcn.ring_enc[i];
158 r = amdgpu_ring_test_ring(ring);
167 DRM_INFO("VCN decode and encode initialized successfully.\n");
173 * vcn_v1_0_hw_fini - stop the hardware block
175 * @handle: amdgpu_device pointer
177 * Stop the VCN block, mark ring as not ready any more
179 static int vcn_v1_0_hw_fini(void *handle)
181 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
182 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
184 if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
193 * vcn_v1_0_suspend - suspend VCN block
195 * @handle: amdgpu_device pointer
197 * HW fini and suspend VCN block
199 static int vcn_v1_0_suspend(void *handle)
202 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
204 r = vcn_v1_0_hw_fini(adev);
208 r = amdgpu_vcn_suspend(adev);
214 * vcn_v1_0_resume - resume VCN block
216 * @handle: amdgpu_device pointer
218 * Resume firmware and hw init VCN block
220 static int vcn_v1_0_resume(void *handle)
223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
225 r = amdgpu_vcn_resume(adev);
229 r = vcn_v1_0_hw_init(adev);
235 * vcn_v1_0_mc_resume - memory controller programming
237 * @adev: amdgpu_device pointer
239 * Let the VCN memory controller know it's offsets
241 static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
243 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
245 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
246 lower_32_bits(adev->vcn.gpu_addr));
247 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
248 upper_32_bits(adev->vcn.gpu_addr));
249 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
250 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
251 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
253 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
254 lower_32_bits(adev->vcn.gpu_addr + size));
255 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
256 upper_32_bits(adev->vcn.gpu_addr + size));
257 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
258 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
260 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
261 lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
262 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
263 upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
264 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
265 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
266 AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
268 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
269 adev->gfx.config.gb_addr_config);
270 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
271 adev->gfx.config.gb_addr_config);
272 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
273 adev->gfx.config.gb_addr_config);
277 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
279 * @adev: amdgpu_device pointer
280 * @sw: enable SW clock gating
282 * Disable clock gating for VCN block
284 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
288 /* JPEG disable CGC */
289 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
291 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
292 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
294 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
296 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
297 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
298 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
300 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
301 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
302 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
304 /* UVD disable CGC */
305 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
306 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
307 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
309 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
311 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
312 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
313 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
315 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
316 data &= ~(UVD_CGC_GATE__SYS_MASK
317 | UVD_CGC_GATE__UDEC_MASK
318 | UVD_CGC_GATE__MPEG2_MASK
319 | UVD_CGC_GATE__REGS_MASK
320 | UVD_CGC_GATE__RBC_MASK
321 | UVD_CGC_GATE__LMI_MC_MASK
322 | UVD_CGC_GATE__LMI_UMC_MASK
323 | UVD_CGC_GATE__IDCT_MASK
324 | UVD_CGC_GATE__MPRD_MASK
325 | UVD_CGC_GATE__MPC_MASK
326 | UVD_CGC_GATE__LBSI_MASK
327 | UVD_CGC_GATE__LRBBM_MASK
328 | UVD_CGC_GATE__UDEC_RE_MASK
329 | UVD_CGC_GATE__UDEC_CM_MASK
330 | UVD_CGC_GATE__UDEC_IT_MASK
331 | UVD_CGC_GATE__UDEC_DB_MASK
332 | UVD_CGC_GATE__UDEC_MP_MASK
333 | UVD_CGC_GATE__WCB_MASK
334 | UVD_CGC_GATE__VCPU_MASK
335 | UVD_CGC_GATE__SCPU_MASK);
336 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
338 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
339 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
340 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
341 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
342 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
343 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
344 | UVD_CGC_CTRL__SYS_MODE_MASK
345 | UVD_CGC_CTRL__UDEC_MODE_MASK
346 | UVD_CGC_CTRL__MPEG2_MODE_MASK
347 | UVD_CGC_CTRL__REGS_MODE_MASK
348 | UVD_CGC_CTRL__RBC_MODE_MASK
349 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
350 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
351 | UVD_CGC_CTRL__IDCT_MODE_MASK
352 | UVD_CGC_CTRL__MPRD_MODE_MASK
353 | UVD_CGC_CTRL__MPC_MODE_MASK
354 | UVD_CGC_CTRL__LBSI_MODE_MASK
355 | UVD_CGC_CTRL__LRBBM_MODE_MASK
356 | UVD_CGC_CTRL__WCB_MODE_MASK
357 | UVD_CGC_CTRL__VCPU_MODE_MASK
358 | UVD_CGC_CTRL__SCPU_MODE_MASK);
359 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
362 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
363 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
364 | UVD_SUVD_CGC_GATE__SIT_MASK
365 | UVD_SUVD_CGC_GATE__SMP_MASK
366 | UVD_SUVD_CGC_GATE__SCM_MASK
367 | UVD_SUVD_CGC_GATE__SDB_MASK
368 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
369 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
370 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
371 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
372 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
373 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
374 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
375 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
376 | UVD_SUVD_CGC_GATE__SCLR_MASK
377 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
378 | UVD_SUVD_CGC_GATE__ENT_MASK
379 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
380 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
381 | UVD_SUVD_CGC_GATE__SITE_MASK
382 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
383 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
384 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
385 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
386 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
387 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
389 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
390 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
391 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
392 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
393 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
394 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
395 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
396 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
397 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
398 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
399 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
400 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
404 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
406 * @adev: amdgpu_device pointer
407 * @sw: enable SW clock gating
409 * Enable clock gating for VCN block
411 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
415 /* enable JPEG CGC */
416 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
417 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
418 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
420 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
421 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
422 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
423 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
425 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
426 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
427 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
430 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
431 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
432 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
434 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
435 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
436 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
437 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
439 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
440 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
441 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
442 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
443 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
444 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
445 | UVD_CGC_CTRL__SYS_MODE_MASK
446 | UVD_CGC_CTRL__UDEC_MODE_MASK
447 | UVD_CGC_CTRL__MPEG2_MODE_MASK
448 | UVD_CGC_CTRL__REGS_MODE_MASK
449 | UVD_CGC_CTRL__RBC_MODE_MASK
450 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
451 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
452 | UVD_CGC_CTRL__IDCT_MODE_MASK
453 | UVD_CGC_CTRL__MPRD_MODE_MASK
454 | UVD_CGC_CTRL__MPC_MODE_MASK
455 | UVD_CGC_CTRL__LBSI_MODE_MASK
456 | UVD_CGC_CTRL__LRBBM_MODE_MASK
457 | UVD_CGC_CTRL__WCB_MODE_MASK
458 | UVD_CGC_CTRL__VCPU_MODE_MASK
459 | UVD_CGC_CTRL__SCPU_MODE_MASK);
460 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
462 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
463 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
464 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
465 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
466 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
467 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
468 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
469 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
470 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
471 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
472 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
473 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
476 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
481 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
482 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
483 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
484 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
485 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
486 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
487 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
488 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
489 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
490 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
491 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
492 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
494 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
495 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
497 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
498 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
499 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
500 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
501 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
502 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
503 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
504 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
505 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
506 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
507 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
508 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
509 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
512 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
514 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
516 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
517 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
519 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
522 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
527 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
528 /* Before power off, this indicator has to be turned on */
529 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
530 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
531 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
532 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
535 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
536 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
537 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
538 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
539 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
540 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
541 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
542 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
543 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
544 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
545 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
547 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
549 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
550 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
551 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
552 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
553 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
554 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
555 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
556 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
557 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
558 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
559 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
560 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
565 * vcn_v1_0_start - start VCN block
567 * @adev: amdgpu_device pointer
569 * Setup and start the VCN block
571 static int vcn_v1_0_start(struct amdgpu_device *adev)
573 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
574 uint32_t rb_bufsz, tmp;
575 uint32_t lmi_swap_cntl;
578 /* disable byte swapping */
581 vcn_v1_0_mc_resume(adev);
583 vcn_1_0_disable_static_power_gating(adev);
584 /* disable clock gating */
585 vcn_v1_0_disable_clock_gating(adev);
587 /* disable interupt */
588 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
589 ~UVD_MASTINT_EN__VCPU_EN_MASK);
591 /* stall UMC and register bus before resetting VCPU */
592 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
593 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
594 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
597 /* put LMI, VCPU, RBC etc... into reset */
598 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
599 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
600 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
601 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
602 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
603 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
604 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
605 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
606 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
609 /* initialize VCN memory controller */
610 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
611 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
612 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
613 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
614 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
615 UVD_LMI_CTRL__REQ_MODE_MASK |
619 /* swap (8 in 32) RB and IB */
622 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
624 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
625 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
626 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
627 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
628 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
629 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
631 /* take all subblocks out of reset, except VCPU */
632 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
633 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
636 /* enable VCPU clock */
637 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
638 UVD_VCPU_CNTL__CLK_EN_MASK);
641 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
642 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
644 /* boot up the VCPU */
645 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
648 for (i = 0; i < 10; ++i) {
651 for (j = 0; j < 100; ++j) {
652 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
661 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
662 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
663 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
664 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
666 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
667 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
673 DRM_ERROR("VCN decode not responding, giving up!!!\n");
676 /* enable master interrupt */
677 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
678 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
679 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
681 /* clear the bit 4 of VCN_STATUS */
682 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
683 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
685 /* force RBC into idle state */
686 rb_bufsz = order_base_2(ring->ring_size);
687 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
688 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
689 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
690 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
691 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
692 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
693 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
695 /* set the write pointer delay */
696 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
698 /* set the wb address */
699 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
700 (upper_32_bits(ring->gpu_addr) >> 2));
702 /* programm the RB_BASE for ring buffer */
703 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
704 lower_32_bits(ring->gpu_addr));
705 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
706 upper_32_bits(ring->gpu_addr));
708 /* Initialize the ring buffer's read and write pointers */
709 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
711 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
712 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
713 lower_32_bits(ring->wptr));
715 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
716 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
718 ring = &adev->vcn.ring_enc[0];
719 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
720 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
721 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
722 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
723 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
725 ring = &adev->vcn.ring_enc[1];
726 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
727 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
728 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
729 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
730 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
736 * vcn_v1_0_stop - stop VCN block
738 * @adev: amdgpu_device pointer
742 static int vcn_v1_0_stop(struct amdgpu_device *adev)
744 /* force RBC into idle state */
745 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
747 /* Stall UMC and register bus before resetting VCPU */
748 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
749 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
750 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
753 /* put VCPU into reset */
754 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
755 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
758 /* disable VCPU clock */
759 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
761 /* Unstall UMC and register bus */
762 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
763 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
765 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
767 vcn_v1_0_enable_clock_gating(adev);
768 vcn_1_0_enable_static_power_gating(adev);
772 bool vcn_v1_0_is_idle(void *handle)
774 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
776 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
779 int vcn_v1_0_wait_for_idle(void *handle)
781 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
784 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
789 static int vcn_v1_0_set_clockgating_state(void *handle,
790 enum amd_clockgating_state state)
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
796 /* wait for STATUS to clear */
797 if (vcn_v1_0_is_idle(handle))
799 vcn_v1_0_enable_clock_gating(adev);
801 /* disable HW gating and enable Sw gating */
802 vcn_v1_0_disable_clock_gating(adev);
808 * vcn_v1_0_dec_ring_get_rptr - get read pointer
810 * @ring: amdgpu_ring pointer
812 * Returns the current hardware read pointer
814 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
816 struct amdgpu_device *adev = ring->adev;
818 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
822 * vcn_v1_0_dec_ring_get_wptr - get write pointer
824 * @ring: amdgpu_ring pointer
826 * Returns the current hardware write pointer
828 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
830 struct amdgpu_device *adev = ring->adev;
832 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
836 * vcn_v1_0_dec_ring_set_wptr - set write pointer
838 * @ring: amdgpu_ring pointer
840 * Commits the write pointer to the hardware
842 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
844 struct amdgpu_device *adev = ring->adev;
846 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
850 * vcn_v1_0_dec_ring_insert_start - insert a start command
852 * @ring: amdgpu_ring pointer
854 * Write a start command to the ring.
856 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
858 struct amdgpu_device *adev = ring->adev;
860 amdgpu_ring_write(ring,
861 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
862 amdgpu_ring_write(ring, 0);
863 amdgpu_ring_write(ring,
864 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
865 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
869 * vcn_v1_0_dec_ring_insert_end - insert a end command
871 * @ring: amdgpu_ring pointer
873 * Write a end command to the ring.
875 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
877 struct amdgpu_device *adev = ring->adev;
879 amdgpu_ring_write(ring,
880 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
881 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
885 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
887 * @ring: amdgpu_ring pointer
888 * @fence: fence to emit
890 * Write a fence and a trap command to the ring.
892 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
895 struct amdgpu_device *adev = ring->adev;
897 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
899 amdgpu_ring_write(ring,
900 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
901 amdgpu_ring_write(ring, seq);
902 amdgpu_ring_write(ring,
903 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
904 amdgpu_ring_write(ring, addr & 0xffffffff);
905 amdgpu_ring_write(ring,
906 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
907 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
908 amdgpu_ring_write(ring,
909 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
910 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
912 amdgpu_ring_write(ring,
913 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
914 amdgpu_ring_write(ring, 0);
915 amdgpu_ring_write(ring,
916 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
917 amdgpu_ring_write(ring, 0);
918 amdgpu_ring_write(ring,
919 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
920 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
924 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
926 * @ring: amdgpu_ring pointer
927 * @ib: indirect buffer to execute
929 * Write ring commands to execute the indirect buffer
931 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
932 struct amdgpu_ib *ib,
933 unsigned vmid, bool ctx_switch)
935 struct amdgpu_device *adev = ring->adev;
937 amdgpu_ring_write(ring,
938 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
939 amdgpu_ring_write(ring, vmid);
941 amdgpu_ring_write(ring,
942 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
943 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
944 amdgpu_ring_write(ring,
945 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
946 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
947 amdgpu_ring_write(ring,
948 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
949 amdgpu_ring_write(ring, ib->length_dw);
952 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
953 uint32_t reg, uint32_t val,
956 struct amdgpu_device *adev = ring->adev;
958 amdgpu_ring_write(ring,
959 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
960 amdgpu_ring_write(ring, reg << 2);
961 amdgpu_ring_write(ring,
962 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
963 amdgpu_ring_write(ring, val);
964 amdgpu_ring_write(ring,
965 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
966 amdgpu_ring_write(ring, mask);
967 amdgpu_ring_write(ring,
968 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
969 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
972 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
973 unsigned vmid, uint64_t pd_addr)
975 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
976 uint32_t data0, data1, mask;
978 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
980 /* wait for register write */
981 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
982 data1 = lower_32_bits(pd_addr);
984 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
987 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
988 uint32_t reg, uint32_t val)
990 struct amdgpu_device *adev = ring->adev;
992 amdgpu_ring_write(ring,
993 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
994 amdgpu_ring_write(ring, reg << 2);
995 amdgpu_ring_write(ring,
996 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
997 amdgpu_ring_write(ring, val);
998 amdgpu_ring_write(ring,
999 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1000 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1004 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1006 * @ring: amdgpu_ring pointer
1008 * Returns the current hardware enc read pointer
1010 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1012 struct amdgpu_device *adev = ring->adev;
1014 if (ring == &adev->vcn.ring_enc[0])
1015 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1017 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1021 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1023 * @ring: amdgpu_ring pointer
1025 * Returns the current hardware enc write pointer
1027 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1029 struct amdgpu_device *adev = ring->adev;
1031 if (ring == &adev->vcn.ring_enc[0])
1032 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1034 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1038 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1040 * @ring: amdgpu_ring pointer
1042 * Commits the enc write pointer to the hardware
1044 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1046 struct amdgpu_device *adev = ring->adev;
1048 if (ring == &adev->vcn.ring_enc[0])
1049 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1050 lower_32_bits(ring->wptr));
1052 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1053 lower_32_bits(ring->wptr));
1057 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1059 * @ring: amdgpu_ring pointer
1060 * @fence: fence to emit
1062 * Write enc a fence and a trap command to the ring.
1064 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1065 u64 seq, unsigned flags)
1067 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1069 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1070 amdgpu_ring_write(ring, addr);
1071 amdgpu_ring_write(ring, upper_32_bits(addr));
1072 amdgpu_ring_write(ring, seq);
1073 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1076 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1078 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1082 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1084 * @ring: amdgpu_ring pointer
1085 * @ib: indirect buffer to execute
1087 * Write enc ring commands to execute the indirect buffer
1089 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1090 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1092 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1093 amdgpu_ring_write(ring, vmid);
1094 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1095 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1096 amdgpu_ring_write(ring, ib->length_dw);
1099 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1100 uint32_t reg, uint32_t val,
1103 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1104 amdgpu_ring_write(ring, reg << 2);
1105 amdgpu_ring_write(ring, mask);
1106 amdgpu_ring_write(ring, val);
1109 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1110 unsigned int vmid, uint64_t pd_addr)
1112 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1114 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1116 /* wait for reg writes */
1117 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1118 lower_32_bits(pd_addr), 0xffffffff);
1121 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1122 uint32_t reg, uint32_t val)
1124 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1125 amdgpu_ring_write(ring, reg << 2);
1126 amdgpu_ring_write(ring, val);
1129 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1130 struct amdgpu_irq_src *source,
1132 enum amdgpu_interrupt_state state)
1137 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1138 struct amdgpu_irq_src *source,
1139 struct amdgpu_iv_entry *entry)
1141 DRM_DEBUG("IH: VCN TRAP\n");
1143 switch (entry->src_id) {
1145 amdgpu_fence_process(&adev->vcn.ring_dec);
1148 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
1151 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
1154 DRM_ERROR("Unhandled interrupt: %d %d\n",
1155 entry->src_id, entry->src_data[0]);
1162 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1164 struct amdgpu_device *adev = ring->adev;
1167 WARN_ON(ring->wptr % 2 || count % 2);
1169 for (i = 0; i < count / 2; i++) {
1170 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1171 amdgpu_ring_write(ring, 0);
1175 static int vcn_v1_0_set_powergating_state(void *handle,
1176 enum amd_powergating_state state)
1178 /* This doesn't actually powergate the VCN block.
1179 * That's done in the dpm code via the SMC. This
1180 * just re-inits the block as necessary. The actual
1181 * gating still happens in the dpm code. We should
1182 * revisit this when there is a cleaner line between
1183 * the smc and the hw blocks
1185 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1187 if (state == AMD_PG_STATE_GATE)
1188 return vcn_v1_0_stop(adev);
1190 return vcn_v1_0_start(adev);
1193 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1195 .early_init = vcn_v1_0_early_init,
1197 .sw_init = vcn_v1_0_sw_init,
1198 .sw_fini = vcn_v1_0_sw_fini,
1199 .hw_init = vcn_v1_0_hw_init,
1200 .hw_fini = vcn_v1_0_hw_fini,
1201 .suspend = vcn_v1_0_suspend,
1202 .resume = vcn_v1_0_resume,
1203 .is_idle = vcn_v1_0_is_idle,
1204 .wait_for_idle = vcn_v1_0_wait_for_idle,
1205 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1206 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1207 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1208 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1209 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1210 .set_powergating_state = vcn_v1_0_set_powergating_state,
1213 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1214 .type = AMDGPU_RING_TYPE_VCN_DEC,
1216 .support_64bit_ptrs = false,
1217 .vmhub = AMDGPU_MMHUB,
1218 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1219 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1220 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1222 6 + 6 + /* hdp invalidate / flush */
1223 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1224 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1225 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1226 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1228 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1229 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1230 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1231 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1232 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1233 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1234 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1235 .insert_start = vcn_v1_0_dec_ring_insert_start,
1236 .insert_end = vcn_v1_0_dec_ring_insert_end,
1237 .pad_ib = amdgpu_ring_generic_pad_ib,
1238 .begin_use = amdgpu_vcn_ring_begin_use,
1239 .end_use = amdgpu_vcn_ring_end_use,
1240 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1241 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1242 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1245 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1246 .type = AMDGPU_RING_TYPE_VCN_ENC,
1248 .nop = VCN_ENC_CMD_NO_OP,
1249 .support_64bit_ptrs = false,
1250 .vmhub = AMDGPU_MMHUB,
1251 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1252 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1253 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1255 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1256 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1257 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1258 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1259 1, /* vcn_v1_0_enc_ring_insert_end */
1260 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
1261 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1262 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1263 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1264 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1265 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1266 .insert_nop = amdgpu_ring_insert_nop,
1267 .insert_end = vcn_v1_0_enc_ring_insert_end,
1268 .pad_ib = amdgpu_ring_generic_pad_ib,
1269 .begin_use = amdgpu_vcn_ring_begin_use,
1270 .end_use = amdgpu_vcn_ring_end_use,
1271 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1272 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1273 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1276 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1278 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1279 DRM_INFO("VCN decode is enabled in VM mode\n");
1282 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1286 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1287 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1289 DRM_INFO("VCN encode is enabled in VM mode\n");
1292 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1293 .set = vcn_v1_0_set_interrupt_state,
1294 .process = vcn_v1_0_process_interrupt,
1297 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1299 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
1300 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1303 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1305 .type = AMD_IP_BLOCK_TYPE_VCN,
1309 .funcs = &vcn_v1_0_ip_funcs,