2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "soc15_common.h"
32 #include "vcn/vcn_1_0_offset.h"
33 #include "vcn/vcn_1_0_sh_mask.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "mmhub/mmhub_9_1_offset.h"
36 #include "mmhub/mmhub_9_1_sh_mask.h"
38 static int vcn_v1_0_stop(struct amdgpu_device *adev);
39 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
40 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
41 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
42 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
43 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
46 * vcn_v1_0_early_init - set function pointers
48 * @handle: amdgpu_device pointer
50 * Set ring and irq function pointers
52 static int vcn_v1_0_early_init(void *handle)
54 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
56 adev->vcn.num_enc_rings = 2;
58 vcn_v1_0_set_dec_ring_funcs(adev);
59 vcn_v1_0_set_enc_ring_funcs(adev);
60 vcn_v1_0_set_jpeg_ring_funcs(adev);
61 vcn_v1_0_set_irq_funcs(adev);
67 * vcn_v1_0_sw_init - sw init for VCN block
69 * @handle: amdgpu_device pointer
71 * Load firmware and sw initialization
73 static int vcn_v1_0_sw_init(void *handle)
75 struct amdgpu_ring *ring;
77 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
80 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
85 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
86 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
93 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
97 r = amdgpu_vcn_sw_init(adev);
101 r = amdgpu_vcn_resume(adev);
105 ring = &adev->vcn.ring_dec;
106 sprintf(ring->name, "vcn_dec");
107 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
111 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
112 ring = &adev->vcn.ring_enc[i];
113 sprintf(ring->name, "vcn_enc%d", i);
114 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
119 ring = &adev->vcn.ring_jpeg;
120 sprintf(ring->name, "vcn_jpeg");
121 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
129 * vcn_v1_0_sw_fini - sw fini for VCN block
131 * @handle: amdgpu_device pointer
133 * VCN suspend and free up sw allocation
135 static int vcn_v1_0_sw_fini(void *handle)
138 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
140 r = amdgpu_vcn_suspend(adev);
144 r = amdgpu_vcn_sw_fini(adev);
150 * vcn_v1_0_hw_init - start and test VCN block
152 * @handle: amdgpu_device pointer
154 * Initialize the hardware, boot up the VCPU and do some testing
156 static int vcn_v1_0_hw_init(void *handle)
158 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
159 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
163 r = amdgpu_ring_test_ring(ring);
169 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
170 ring = &adev->vcn.ring_enc[i];
172 r = amdgpu_ring_test_ring(ring);
179 ring = &adev->vcn.ring_jpeg;
181 r = amdgpu_ring_test_ring(ring);
189 DRM_INFO("VCN decode and encode initialized successfully.\n");
195 * vcn_v1_0_hw_fini - stop the hardware block
197 * @handle: amdgpu_device pointer
199 * Stop the VCN block, mark ring as not ready any more
201 static int vcn_v1_0_hw_fini(void *handle)
203 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
204 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
206 if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
215 * vcn_v1_0_suspend - suspend VCN block
217 * @handle: amdgpu_device pointer
219 * HW fini and suspend VCN block
221 static int vcn_v1_0_suspend(void *handle)
224 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 r = vcn_v1_0_hw_fini(adev);
230 r = amdgpu_vcn_suspend(adev);
236 * vcn_v1_0_resume - resume VCN block
238 * @handle: amdgpu_device pointer
240 * Resume firmware and hw init VCN block
242 static int vcn_v1_0_resume(void *handle)
245 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
247 r = amdgpu_vcn_resume(adev);
251 r = vcn_v1_0_hw_init(adev);
257 * vcn_v1_0_mc_resume - memory controller programming
259 * @adev: amdgpu_device pointer
261 * Let the VCN memory controller know it's offsets
263 static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
265 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
267 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
268 lower_32_bits(adev->vcn.gpu_addr));
269 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
270 upper_32_bits(adev->vcn.gpu_addr));
271 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
272 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
273 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
275 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
276 lower_32_bits(adev->vcn.gpu_addr + size));
277 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
278 upper_32_bits(adev->vcn.gpu_addr + size));
279 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
280 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
282 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
283 lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
284 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
285 upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
286 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
287 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
288 AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
290 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
291 adev->gfx.config.gb_addr_config);
292 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
293 adev->gfx.config.gb_addr_config);
294 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
295 adev->gfx.config.gb_addr_config);
299 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
301 * @adev: amdgpu_device pointer
302 * @sw: enable SW clock gating
304 * Disable clock gating for VCN block
306 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
310 /* JPEG disable CGC */
311 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
313 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
314 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
316 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
318 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
319 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
320 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
322 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
323 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
324 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
326 /* UVD disable CGC */
327 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
328 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
329 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
331 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
333 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
334 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
335 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
337 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
338 data &= ~(UVD_CGC_GATE__SYS_MASK
339 | UVD_CGC_GATE__UDEC_MASK
340 | UVD_CGC_GATE__MPEG2_MASK
341 | UVD_CGC_GATE__REGS_MASK
342 | UVD_CGC_GATE__RBC_MASK
343 | UVD_CGC_GATE__LMI_MC_MASK
344 | UVD_CGC_GATE__LMI_UMC_MASK
345 | UVD_CGC_GATE__IDCT_MASK
346 | UVD_CGC_GATE__MPRD_MASK
347 | UVD_CGC_GATE__MPC_MASK
348 | UVD_CGC_GATE__LBSI_MASK
349 | UVD_CGC_GATE__LRBBM_MASK
350 | UVD_CGC_GATE__UDEC_RE_MASK
351 | UVD_CGC_GATE__UDEC_CM_MASK
352 | UVD_CGC_GATE__UDEC_IT_MASK
353 | UVD_CGC_GATE__UDEC_DB_MASK
354 | UVD_CGC_GATE__UDEC_MP_MASK
355 | UVD_CGC_GATE__WCB_MASK
356 | UVD_CGC_GATE__VCPU_MASK
357 | UVD_CGC_GATE__SCPU_MASK);
358 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
360 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
361 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
362 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
363 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
364 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
365 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
366 | UVD_CGC_CTRL__SYS_MODE_MASK
367 | UVD_CGC_CTRL__UDEC_MODE_MASK
368 | UVD_CGC_CTRL__MPEG2_MODE_MASK
369 | UVD_CGC_CTRL__REGS_MODE_MASK
370 | UVD_CGC_CTRL__RBC_MODE_MASK
371 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
372 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
373 | UVD_CGC_CTRL__IDCT_MODE_MASK
374 | UVD_CGC_CTRL__MPRD_MODE_MASK
375 | UVD_CGC_CTRL__MPC_MODE_MASK
376 | UVD_CGC_CTRL__LBSI_MODE_MASK
377 | UVD_CGC_CTRL__LRBBM_MODE_MASK
378 | UVD_CGC_CTRL__WCB_MODE_MASK
379 | UVD_CGC_CTRL__VCPU_MODE_MASK
380 | UVD_CGC_CTRL__SCPU_MODE_MASK);
381 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
384 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
385 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
386 | UVD_SUVD_CGC_GATE__SIT_MASK
387 | UVD_SUVD_CGC_GATE__SMP_MASK
388 | UVD_SUVD_CGC_GATE__SCM_MASK
389 | UVD_SUVD_CGC_GATE__SDB_MASK
390 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
391 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
392 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
393 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
394 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
395 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
396 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
397 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
398 | UVD_SUVD_CGC_GATE__SCLR_MASK
399 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
400 | UVD_SUVD_CGC_GATE__ENT_MASK
401 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
402 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
403 | UVD_SUVD_CGC_GATE__SITE_MASK
404 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
405 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
406 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
407 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
408 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
409 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
411 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
412 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
413 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
414 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
415 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
416 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
417 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
418 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
419 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
420 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
421 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
422 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
426 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
428 * @adev: amdgpu_device pointer
429 * @sw: enable SW clock gating
431 * Enable clock gating for VCN block
433 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
437 /* enable JPEG CGC */
438 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
439 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
440 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
442 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
443 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
444 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
445 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
447 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
448 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
449 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
452 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
453 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
454 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
456 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
457 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
458 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
459 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
461 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
462 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
463 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
464 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
465 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
466 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
467 | UVD_CGC_CTRL__SYS_MODE_MASK
468 | UVD_CGC_CTRL__UDEC_MODE_MASK
469 | UVD_CGC_CTRL__MPEG2_MODE_MASK
470 | UVD_CGC_CTRL__REGS_MODE_MASK
471 | UVD_CGC_CTRL__RBC_MODE_MASK
472 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
473 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
474 | UVD_CGC_CTRL__IDCT_MODE_MASK
475 | UVD_CGC_CTRL__MPRD_MODE_MASK
476 | UVD_CGC_CTRL__MPC_MODE_MASK
477 | UVD_CGC_CTRL__LBSI_MODE_MASK
478 | UVD_CGC_CTRL__LRBBM_MODE_MASK
479 | UVD_CGC_CTRL__WCB_MODE_MASK
480 | UVD_CGC_CTRL__VCPU_MODE_MASK
481 | UVD_CGC_CTRL__SCPU_MODE_MASK);
482 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
484 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
485 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
486 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
487 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
488 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
489 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
490 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
491 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
492 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
493 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
494 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
495 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
498 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
503 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
504 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
505 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
506 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
507 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
508 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
509 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
510 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
511 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
512 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
513 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
514 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
516 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
517 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
519 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
520 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
521 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
522 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
523 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
524 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
525 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
526 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
527 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
528 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
529 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
530 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
531 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
534 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
536 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
538 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
539 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
541 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
544 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
549 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
550 /* Before power off, this indicator has to be turned on */
551 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
552 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
553 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
554 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
557 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
558 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
559 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
560 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
561 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
562 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
563 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
564 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
565 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
566 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
567 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
569 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
571 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
572 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
573 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
574 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
575 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
576 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
577 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
578 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
579 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
580 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
581 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
582 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
587 * vcn_v1_0_start - start VCN block
589 * @adev: amdgpu_device pointer
591 * Setup and start the VCN block
593 static int vcn_v1_0_start(struct amdgpu_device *adev)
595 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
596 uint32_t rb_bufsz, tmp;
597 uint32_t lmi_swap_cntl;
600 /* disable byte swapping */
603 vcn_v1_0_mc_resume(adev);
605 vcn_1_0_disable_static_power_gating(adev);
606 /* disable clock gating */
607 vcn_v1_0_disable_clock_gating(adev);
609 /* disable interupt */
610 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
611 ~UVD_MASTINT_EN__VCPU_EN_MASK);
613 /* stall UMC and register bus before resetting VCPU */
614 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
615 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
616 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
619 /* put LMI, VCPU, RBC etc... into reset */
620 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
621 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
622 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
623 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
624 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
625 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
626 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
627 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
628 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
631 /* initialize VCN memory controller */
632 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
633 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
634 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
635 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
636 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
637 UVD_LMI_CTRL__REQ_MODE_MASK |
641 /* swap (8 in 32) RB and IB */
644 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
646 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
647 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
648 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
649 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
650 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
651 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
653 /* take all subblocks out of reset, except VCPU */
654 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
655 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
658 /* enable VCPU clock */
659 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
660 UVD_VCPU_CNTL__CLK_EN_MASK);
663 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
664 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
666 /* boot up the VCPU */
667 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
670 for (i = 0; i < 10; ++i) {
673 for (j = 0; j < 100; ++j) {
674 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
683 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
684 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
685 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
686 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
688 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
689 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
695 DRM_ERROR("VCN decode not responding, giving up!!!\n");
698 /* enable master interrupt */
699 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
700 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
701 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
703 /* clear the bit 4 of VCN_STATUS */
704 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
705 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
707 /* force RBC into idle state */
708 rb_bufsz = order_base_2(ring->ring_size);
709 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
710 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
711 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
712 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
713 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
714 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
715 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
717 /* set the write pointer delay */
718 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
720 /* set the wb address */
721 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
722 (upper_32_bits(ring->gpu_addr) >> 2));
724 /* programm the RB_BASE for ring buffer */
725 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
726 lower_32_bits(ring->gpu_addr));
727 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
728 upper_32_bits(ring->gpu_addr));
730 /* Initialize the ring buffer's read and write pointers */
731 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
733 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
734 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
735 lower_32_bits(ring->wptr));
737 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
738 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
740 ring = &adev->vcn.ring_enc[0];
741 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
742 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
743 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
744 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
745 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
747 ring = &adev->vcn.ring_enc[1];
748 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
749 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
750 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
751 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
752 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
754 ring = &adev->vcn.ring_jpeg;
755 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
756 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
757 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
758 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
759 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
760 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
761 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
763 /* initialize wptr */
764 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
766 /* copy patch commands to the jpeg ring */
767 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
768 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
774 * vcn_v1_0_stop - stop VCN block
776 * @adev: amdgpu_device pointer
780 static int vcn_v1_0_stop(struct amdgpu_device *adev)
782 /* force RBC into idle state */
783 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
785 /* Stall UMC and register bus before resetting VCPU */
786 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
787 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
788 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
791 /* put VCPU into reset */
792 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
793 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
796 /* disable VCPU clock */
797 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
799 /* Unstall UMC and register bus */
800 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
801 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
803 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
805 vcn_v1_0_enable_clock_gating(adev);
806 vcn_1_0_enable_static_power_gating(adev);
810 static bool vcn_v1_0_is_idle(void *handle)
812 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
814 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
817 static int vcn_v1_0_wait_for_idle(void *handle)
819 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
822 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
827 static int vcn_v1_0_set_clockgating_state(void *handle,
828 enum amd_clockgating_state state)
830 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
831 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
834 /* wait for STATUS to clear */
835 if (vcn_v1_0_is_idle(handle))
837 vcn_v1_0_enable_clock_gating(adev);
839 /* disable HW gating and enable Sw gating */
840 vcn_v1_0_disable_clock_gating(adev);
846 * vcn_v1_0_dec_ring_get_rptr - get read pointer
848 * @ring: amdgpu_ring pointer
850 * Returns the current hardware read pointer
852 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
854 struct amdgpu_device *adev = ring->adev;
856 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
860 * vcn_v1_0_dec_ring_get_wptr - get write pointer
862 * @ring: amdgpu_ring pointer
864 * Returns the current hardware write pointer
866 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
868 struct amdgpu_device *adev = ring->adev;
870 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
874 * vcn_v1_0_dec_ring_set_wptr - set write pointer
876 * @ring: amdgpu_ring pointer
878 * Commits the write pointer to the hardware
880 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
882 struct amdgpu_device *adev = ring->adev;
884 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
888 * vcn_v1_0_dec_ring_insert_start - insert a start command
890 * @ring: amdgpu_ring pointer
892 * Write a start command to the ring.
894 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
896 struct amdgpu_device *adev = ring->adev;
898 amdgpu_ring_write(ring,
899 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
900 amdgpu_ring_write(ring, 0);
901 amdgpu_ring_write(ring,
902 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
903 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
907 * vcn_v1_0_dec_ring_insert_end - insert a end command
909 * @ring: amdgpu_ring pointer
911 * Write a end command to the ring.
913 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
915 struct amdgpu_device *adev = ring->adev;
917 amdgpu_ring_write(ring,
918 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
919 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
923 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
925 * @ring: amdgpu_ring pointer
926 * @fence: fence to emit
928 * Write a fence and a trap command to the ring.
930 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
933 struct amdgpu_device *adev = ring->adev;
935 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
937 amdgpu_ring_write(ring,
938 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
939 amdgpu_ring_write(ring, seq);
940 amdgpu_ring_write(ring,
941 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
942 amdgpu_ring_write(ring, addr & 0xffffffff);
943 amdgpu_ring_write(ring,
944 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
945 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
946 amdgpu_ring_write(ring,
947 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
948 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
950 amdgpu_ring_write(ring,
951 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
952 amdgpu_ring_write(ring, 0);
953 amdgpu_ring_write(ring,
954 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
955 amdgpu_ring_write(ring, 0);
956 amdgpu_ring_write(ring,
957 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
958 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
962 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
964 * @ring: amdgpu_ring pointer
965 * @ib: indirect buffer to execute
967 * Write ring commands to execute the indirect buffer
969 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
970 struct amdgpu_ib *ib,
971 unsigned vmid, bool ctx_switch)
973 struct amdgpu_device *adev = ring->adev;
975 amdgpu_ring_write(ring,
976 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
977 amdgpu_ring_write(ring, vmid);
979 amdgpu_ring_write(ring,
980 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
981 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
982 amdgpu_ring_write(ring,
983 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
984 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
985 amdgpu_ring_write(ring,
986 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
987 amdgpu_ring_write(ring, ib->length_dw);
990 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
991 uint32_t reg, uint32_t val,
994 struct amdgpu_device *adev = ring->adev;
996 amdgpu_ring_write(ring,
997 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
998 amdgpu_ring_write(ring, reg << 2);
999 amdgpu_ring_write(ring,
1000 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1001 amdgpu_ring_write(ring, val);
1002 amdgpu_ring_write(ring,
1003 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1004 amdgpu_ring_write(ring, mask);
1005 amdgpu_ring_write(ring,
1006 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1007 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1010 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1011 unsigned vmid, uint64_t pd_addr)
1013 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1014 uint32_t data0, data1, mask;
1016 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1018 /* wait for register write */
1019 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1020 data1 = lower_32_bits(pd_addr);
1022 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1025 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1026 uint32_t reg, uint32_t val)
1028 struct amdgpu_device *adev = ring->adev;
1030 amdgpu_ring_write(ring,
1031 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1032 amdgpu_ring_write(ring, reg << 2);
1033 amdgpu_ring_write(ring,
1034 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1035 amdgpu_ring_write(ring, val);
1036 amdgpu_ring_write(ring,
1037 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1038 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1042 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1044 * @ring: amdgpu_ring pointer
1046 * Returns the current hardware enc read pointer
1048 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1050 struct amdgpu_device *adev = ring->adev;
1052 if (ring == &adev->vcn.ring_enc[0])
1053 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1055 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1059 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1061 * @ring: amdgpu_ring pointer
1063 * Returns the current hardware enc write pointer
1065 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1067 struct amdgpu_device *adev = ring->adev;
1069 if (ring == &adev->vcn.ring_enc[0])
1070 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1072 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1076 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1078 * @ring: amdgpu_ring pointer
1080 * Commits the enc write pointer to the hardware
1082 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1084 struct amdgpu_device *adev = ring->adev;
1086 if (ring == &adev->vcn.ring_enc[0])
1087 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1088 lower_32_bits(ring->wptr));
1090 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1091 lower_32_bits(ring->wptr));
1095 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1097 * @ring: amdgpu_ring pointer
1098 * @fence: fence to emit
1100 * Write enc a fence and a trap command to the ring.
1102 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1103 u64 seq, unsigned flags)
1105 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1107 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1108 amdgpu_ring_write(ring, addr);
1109 amdgpu_ring_write(ring, upper_32_bits(addr));
1110 amdgpu_ring_write(ring, seq);
1111 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1114 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1116 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1120 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1122 * @ring: amdgpu_ring pointer
1123 * @ib: indirect buffer to execute
1125 * Write enc ring commands to execute the indirect buffer
1127 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1128 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1130 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1131 amdgpu_ring_write(ring, vmid);
1132 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1133 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1134 amdgpu_ring_write(ring, ib->length_dw);
1137 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1138 uint32_t reg, uint32_t val,
1141 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1142 amdgpu_ring_write(ring, reg << 2);
1143 amdgpu_ring_write(ring, mask);
1144 amdgpu_ring_write(ring, val);
1147 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1148 unsigned int vmid, uint64_t pd_addr)
1150 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1152 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1154 /* wait for reg writes */
1155 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1156 lower_32_bits(pd_addr), 0xffffffff);
1159 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1160 uint32_t reg, uint32_t val)
1162 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1163 amdgpu_ring_write(ring, reg << 2);
1164 amdgpu_ring_write(ring, val);
1169 * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1171 * @ring: amdgpu_ring pointer
1173 * Returns the current hardware read pointer
1175 static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1177 struct amdgpu_device *adev = ring->adev;
1179 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1183 * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1185 * @ring: amdgpu_ring pointer
1187 * Returns the current hardware write pointer
1189 static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1191 struct amdgpu_device *adev = ring->adev;
1193 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1197 * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1199 * @ring: amdgpu_ring pointer
1201 * Commits the write pointer to the hardware
1203 static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1205 struct amdgpu_device *adev = ring->adev;
1207 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1211 * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1213 * @ring: amdgpu_ring pointer
1215 * Write a start command to the ring.
1217 static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1219 struct amdgpu_device *adev = ring->adev;
1221 amdgpu_ring_write(ring,
1222 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1223 amdgpu_ring_write(ring, 0x68e04);
1225 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1226 amdgpu_ring_write(ring, 0x80010000);
1230 * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1232 * @ring: amdgpu_ring pointer
1234 * Write a end command to the ring.
1236 static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1238 struct amdgpu_device *adev = ring->adev;
1240 amdgpu_ring_write(ring,
1241 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1242 amdgpu_ring_write(ring, 0x68e04);
1244 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1245 amdgpu_ring_write(ring, 0x00010000);
1249 * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1251 * @ring: amdgpu_ring pointer
1252 * @fence: fence to emit
1254 * Write a fence and a trap command to the ring.
1256 static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1259 struct amdgpu_device *adev = ring->adev;
1261 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1263 amdgpu_ring_write(ring,
1264 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
1265 amdgpu_ring_write(ring, seq);
1267 amdgpu_ring_write(ring,
1268 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
1269 amdgpu_ring_write(ring, seq);
1271 amdgpu_ring_write(ring,
1272 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1273 amdgpu_ring_write(ring, lower_32_bits(addr));
1275 amdgpu_ring_write(ring,
1276 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1277 amdgpu_ring_write(ring, upper_32_bits(addr));
1279 amdgpu_ring_write(ring,
1280 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
1281 amdgpu_ring_write(ring, 0x8);
1283 amdgpu_ring_write(ring,
1284 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1285 amdgpu_ring_write(ring, 0);
1287 amdgpu_ring_write(ring,
1288 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1289 amdgpu_ring_write(ring, 0x01400200);
1291 amdgpu_ring_write(ring,
1292 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1293 amdgpu_ring_write(ring, seq);
1295 amdgpu_ring_write(ring,
1296 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1297 amdgpu_ring_write(ring, lower_32_bits(addr));
1299 amdgpu_ring_write(ring,
1300 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1301 amdgpu_ring_write(ring, upper_32_bits(addr));
1303 amdgpu_ring_write(ring,
1304 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
1305 amdgpu_ring_write(ring, 0xffffffff);
1307 amdgpu_ring_write(ring,
1308 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1309 amdgpu_ring_write(ring, 0x3fbc);
1311 amdgpu_ring_write(ring,
1312 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1313 amdgpu_ring_write(ring, 0x1);
1317 * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1319 * @ring: amdgpu_ring pointer
1320 * @ib: indirect buffer to execute
1322 * Write ring commands to execute the indirect buffer.
1324 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1325 struct amdgpu_ib *ib,
1326 unsigned vmid, bool ctx_switch)
1328 struct amdgpu_device *adev = ring->adev;
1330 amdgpu_ring_write(ring,
1331 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
1332 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1334 amdgpu_ring_write(ring,
1335 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
1336 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1338 amdgpu_ring_write(ring,
1339 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1340 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1342 amdgpu_ring_write(ring,
1343 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1344 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1346 amdgpu_ring_write(ring,
1347 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
1348 amdgpu_ring_write(ring, ib->length_dw);
1350 amdgpu_ring_write(ring,
1351 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1352 amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1354 amdgpu_ring_write(ring,
1355 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1356 amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1358 amdgpu_ring_write(ring,
1359 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1360 amdgpu_ring_write(ring, 0);
1362 amdgpu_ring_write(ring,
1363 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1364 amdgpu_ring_write(ring, 0x01400200);
1366 amdgpu_ring_write(ring,
1367 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1368 amdgpu_ring_write(ring, 0x2);
1370 amdgpu_ring_write(ring,
1371 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1372 amdgpu_ring_write(ring, 0x2);
1375 static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1376 uint32_t reg, uint32_t val,
1379 struct amdgpu_device *adev = ring->adev;
1380 uint32_t reg_offset = (reg << 2);
1382 amdgpu_ring_write(ring,
1383 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1384 amdgpu_ring_write(ring, 0x01400200);
1386 amdgpu_ring_write(ring,
1387 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1388 amdgpu_ring_write(ring, val);
1390 amdgpu_ring_write(ring,
1391 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1392 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1393 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1394 amdgpu_ring_write(ring, 0);
1395 amdgpu_ring_write(ring,
1396 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1398 amdgpu_ring_write(ring, reg_offset);
1399 amdgpu_ring_write(ring,
1400 PACKETJ(0, 0, 0, PACKETJ_TYPE3));
1402 amdgpu_ring_write(ring, mask);
1405 static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
1406 unsigned vmid, uint64_t pd_addr)
1408 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1409 uint32_t data0, data1, mask;
1411 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1413 /* wait for register write */
1414 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1415 data1 = lower_32_bits(pd_addr);
1417 vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
1420 static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
1421 uint32_t reg, uint32_t val)
1423 struct amdgpu_device *adev = ring->adev;
1424 uint32_t reg_offset = (reg << 2);
1426 amdgpu_ring_write(ring,
1427 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1428 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1429 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1430 amdgpu_ring_write(ring, 0);
1431 amdgpu_ring_write(ring,
1432 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
1434 amdgpu_ring_write(ring, reg_offset);
1435 amdgpu_ring_write(ring,
1436 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1438 amdgpu_ring_write(ring, val);
1441 static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
1445 WARN_ON(ring->wptr % 2 || count % 2);
1447 for (i = 0; i < count / 2; i++) {
1448 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
1449 amdgpu_ring_write(ring, 0);
1453 static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
1455 struct amdgpu_device *adev = ring->adev;
1456 ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1457 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1458 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1459 ring->ring[(*ptr)++] = 0;
1460 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
1462 ring->ring[(*ptr)++] = reg_offset;
1463 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
1465 ring->ring[(*ptr)++] = val;
1468 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
1470 struct amdgpu_device *adev = ring->adev;
1472 uint32_t reg, reg_offset, val, mask, i;
1474 // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
1475 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
1476 reg_offset = (reg << 2);
1477 val = lower_32_bits(ring->gpu_addr);
1478 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1480 // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
1481 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
1482 reg_offset = (reg << 2);
1483 val = upper_32_bits(ring->gpu_addr);
1484 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1486 // 3rd to 5th: issue MEM_READ commands
1487 for (i = 0; i <= 2; i++) {
1488 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
1489 ring->ring[ptr++] = 0;
1492 // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
1493 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1494 reg_offset = (reg << 2);
1496 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1498 // 7th: program mmUVD_JRBC_RB_REF_DATA
1499 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
1500 reg_offset = (reg << 2);
1502 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1504 // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
1505 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1506 reg_offset = (reg << 2);
1510 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
1511 ring->ring[ptr++] = 0x01400200;
1512 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
1513 ring->ring[ptr++] = val;
1514 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1515 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1516 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1517 ring->ring[ptr++] = 0;
1518 ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
1520 ring->ring[ptr++] = reg_offset;
1521 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
1523 ring->ring[ptr++] = mask;
1525 //9th to 21st: insert no-op
1526 for (i = 0; i <= 12; i++) {
1527 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
1528 ring->ring[ptr++] = 0;
1531 //22nd: reset mmUVD_JRBC_RB_RPTR
1532 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
1533 reg_offset = (reg << 2);
1535 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1537 //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
1538 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1539 reg_offset = (reg << 2);
1541 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1544 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1545 struct amdgpu_irq_src *source,
1547 enum amdgpu_interrupt_state state)
1552 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1553 struct amdgpu_irq_src *source,
1554 struct amdgpu_iv_entry *entry)
1556 DRM_DEBUG("IH: VCN TRAP\n");
1558 switch (entry->src_id) {
1560 amdgpu_fence_process(&adev->vcn.ring_dec);
1563 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
1566 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
1569 amdgpu_fence_process(&adev->vcn.ring_jpeg);
1572 DRM_ERROR("Unhandled interrupt: %d %d\n",
1573 entry->src_id, entry->src_data[0]);
1580 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1582 struct amdgpu_device *adev = ring->adev;
1585 WARN_ON(ring->wptr % 2 || count % 2);
1587 for (i = 0; i < count / 2; i++) {
1588 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1589 amdgpu_ring_write(ring, 0);
1593 static int vcn_v1_0_set_powergating_state(void *handle,
1594 enum amd_powergating_state state)
1596 /* This doesn't actually powergate the VCN block.
1597 * That's done in the dpm code via the SMC. This
1598 * just re-inits the block as necessary. The actual
1599 * gating still happens in the dpm code. We should
1600 * revisit this when there is a cleaner line between
1601 * the smc and the hw blocks
1603 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605 if (state == AMD_PG_STATE_GATE)
1606 return vcn_v1_0_stop(adev);
1608 return vcn_v1_0_start(adev);
1611 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1613 .early_init = vcn_v1_0_early_init,
1615 .sw_init = vcn_v1_0_sw_init,
1616 .sw_fini = vcn_v1_0_sw_fini,
1617 .hw_init = vcn_v1_0_hw_init,
1618 .hw_fini = vcn_v1_0_hw_fini,
1619 .suspend = vcn_v1_0_suspend,
1620 .resume = vcn_v1_0_resume,
1621 .is_idle = vcn_v1_0_is_idle,
1622 .wait_for_idle = vcn_v1_0_wait_for_idle,
1623 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1624 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1625 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1626 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1627 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1628 .set_powergating_state = vcn_v1_0_set_powergating_state,
1631 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1632 .type = AMDGPU_RING_TYPE_VCN_DEC,
1634 .support_64bit_ptrs = false,
1635 .vmhub = AMDGPU_MMHUB,
1636 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1637 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1638 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1640 6 + 6 + /* hdp invalidate / flush */
1641 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1642 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1643 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1644 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1646 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1647 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1648 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1649 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1650 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1651 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1652 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1653 .insert_start = vcn_v1_0_dec_ring_insert_start,
1654 .insert_end = vcn_v1_0_dec_ring_insert_end,
1655 .pad_ib = amdgpu_ring_generic_pad_ib,
1656 .begin_use = amdgpu_vcn_ring_begin_use,
1657 .end_use = amdgpu_vcn_ring_end_use,
1658 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1659 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1660 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1663 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1664 .type = AMDGPU_RING_TYPE_VCN_ENC,
1666 .nop = VCN_ENC_CMD_NO_OP,
1667 .support_64bit_ptrs = false,
1668 .vmhub = AMDGPU_MMHUB,
1669 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1670 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1671 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1673 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1674 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1675 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1676 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1677 1, /* vcn_v1_0_enc_ring_insert_end */
1678 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
1679 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1680 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1681 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1682 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1683 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1684 .insert_nop = amdgpu_ring_insert_nop,
1685 .insert_end = vcn_v1_0_enc_ring_insert_end,
1686 .pad_ib = amdgpu_ring_generic_pad_ib,
1687 .begin_use = amdgpu_vcn_ring_begin_use,
1688 .end_use = amdgpu_vcn_ring_end_use,
1689 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1690 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1691 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1694 static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
1695 .type = AMDGPU_RING_TYPE_VCN_JPEG,
1697 .nop = PACKET0(0x81ff, 0),
1698 .support_64bit_ptrs = false,
1699 .vmhub = AMDGPU_MMHUB,
1701 .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
1702 .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
1703 .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
1705 6 + 6 + /* hdp invalidate / flush */
1706 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1707 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1708 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1709 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1711 .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
1712 .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
1713 .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
1714 .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
1715 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1716 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1717 .insert_nop = vcn_v1_0_jpeg_ring_nop,
1718 .insert_start = vcn_v1_0_jpeg_ring_insert_start,
1719 .insert_end = vcn_v1_0_jpeg_ring_insert_end,
1720 .pad_ib = amdgpu_ring_generic_pad_ib,
1721 .begin_use = amdgpu_vcn_ring_begin_use,
1722 .end_use = amdgpu_vcn_ring_end_use,
1723 .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
1724 .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
1727 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1729 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1730 DRM_INFO("VCN decode is enabled in VM mode\n");
1733 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1737 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1738 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1740 DRM_INFO("VCN encode is enabled in VM mode\n");
1743 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1745 adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
1746 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
1749 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1750 .set = vcn_v1_0_set_interrupt_state,
1751 .process = vcn_v1_0_process_interrupt,
1754 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1756 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
1757 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1760 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1762 .type = AMD_IP_BLOCK_TYPE_VCN,
1766 .funcs = &vcn_v1_0_ip_funcs,