]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
Merge remote-tracking branches 'asoc/fix/arizona', 'asoc/fix/dpcm', 'asoc/fix/dwc...
[linux.git] / drivers / gpu / drm / amd / amdgpu / uvd_v6_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König <[email protected]>
23  */
24
25 #include <linux/firmware.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
38 #include "vi.h"
39
40 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42 static int uvd_v6_0_start(struct amdgpu_device *adev);
43 static void uvd_v6_0_stop(struct amdgpu_device *adev);
44 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
45 static int uvd_v6_0_set_clockgating_state(void *handle,
46                                           enum amd_clockgating_state state);
47 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
48                                  bool enable);
49
50 /**
51  * uvd_v6_0_ring_get_rptr - get read pointer
52  *
53  * @ring: amdgpu_ring pointer
54  *
55  * Returns the current hardware read pointer
56  */
57 static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
58 {
59         struct amdgpu_device *adev = ring->adev;
60
61         return RREG32(mmUVD_RBC_RB_RPTR);
62 }
63
64 /**
65  * uvd_v6_0_ring_get_wptr - get write pointer
66  *
67  * @ring: amdgpu_ring pointer
68  *
69  * Returns the current hardware write pointer
70  */
71 static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
72 {
73         struct amdgpu_device *adev = ring->adev;
74
75         return RREG32(mmUVD_RBC_RB_WPTR);
76 }
77
78 /**
79  * uvd_v6_0_ring_set_wptr - set write pointer
80  *
81  * @ring: amdgpu_ring pointer
82  *
83  * Commits the write pointer to the hardware
84  */
85 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
86 {
87         struct amdgpu_device *adev = ring->adev;
88
89         WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
90 }
91
92 static int uvd_v6_0_early_init(void *handle)
93 {
94         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
95
96         uvd_v6_0_set_ring_funcs(adev);
97         uvd_v6_0_set_irq_funcs(adev);
98
99         return 0;
100 }
101
102 static int uvd_v6_0_sw_init(void *handle)
103 {
104         struct amdgpu_ring *ring;
105         int r;
106         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
107
108         /* UVD TRAP */
109         r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
110         if (r)
111                 return r;
112
113         r = amdgpu_uvd_sw_init(adev);
114         if (r)
115                 return r;
116
117         r = amdgpu_uvd_resume(adev);
118         if (r)
119                 return r;
120
121         ring = &adev->uvd.ring;
122         sprintf(ring->name, "uvd");
123         r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
124
125         return r;
126 }
127
128 static int uvd_v6_0_sw_fini(void *handle)
129 {
130         int r;
131         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
132
133         r = amdgpu_uvd_suspend(adev);
134         if (r)
135                 return r;
136
137         r = amdgpu_uvd_sw_fini(adev);
138         if (r)
139                 return r;
140
141         return r;
142 }
143
144 /**
145  * uvd_v6_0_hw_init - start and test UVD block
146  *
147  * @adev: amdgpu_device pointer
148  *
149  * Initialize the hardware, boot up the VCPU and do some testing
150  */
151 static int uvd_v6_0_hw_init(void *handle)
152 {
153         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
154         struct amdgpu_ring *ring = &adev->uvd.ring;
155         uint32_t tmp;
156         int r;
157
158         r = uvd_v6_0_start(adev);
159         if (r)
160                 goto done;
161
162         ring->ready = true;
163         r = amdgpu_ring_test_ring(ring);
164         if (r) {
165                 ring->ready = false;
166                 goto done;
167         }
168
169         r = amdgpu_ring_alloc(ring, 10);
170         if (r) {
171                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
172                 goto done;
173         }
174
175         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
176         amdgpu_ring_write(ring, tmp);
177         amdgpu_ring_write(ring, 0xFFFFF);
178
179         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
180         amdgpu_ring_write(ring, tmp);
181         amdgpu_ring_write(ring, 0xFFFFF);
182
183         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
184         amdgpu_ring_write(ring, tmp);
185         amdgpu_ring_write(ring, 0xFFFFF);
186
187         /* Clear timeout status bits */
188         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
189         amdgpu_ring_write(ring, 0x8);
190
191         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
192         amdgpu_ring_write(ring, 3);
193
194         amdgpu_ring_commit(ring);
195
196 done:
197         if (!r)
198                 DRM_INFO("UVD initialized successfully.\n");
199
200         return r;
201 }
202
203 /**
204  * uvd_v6_0_hw_fini - stop the hardware block
205  *
206  * @adev: amdgpu_device pointer
207  *
208  * Stop the UVD block, mark ring as not ready any more
209  */
210 static int uvd_v6_0_hw_fini(void *handle)
211 {
212         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
213         struct amdgpu_ring *ring = &adev->uvd.ring;
214
215         uvd_v6_0_stop(adev);
216         ring->ready = false;
217
218         return 0;
219 }
220
221 static int uvd_v6_0_suspend(void *handle)
222 {
223         int r;
224         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
225
226         r = uvd_v6_0_hw_fini(adev);
227         if (r)
228                 return r;
229
230         /* Skip this for APU for now */
231         if (!(adev->flags & AMD_IS_APU)) {
232                 r = amdgpu_uvd_suspend(adev);
233                 if (r)
234                         return r;
235         }
236
237         return r;
238 }
239
240 static int uvd_v6_0_resume(void *handle)
241 {
242         int r;
243         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
244
245         /* Skip this for APU for now */
246         if (!(adev->flags & AMD_IS_APU)) {
247                 r = amdgpu_uvd_resume(adev);
248                 if (r)
249                         return r;
250         }
251         r = uvd_v6_0_hw_init(adev);
252         if (r)
253                 return r;
254
255         return r;
256 }
257
258 /**
259  * uvd_v6_0_mc_resume - memory controller programming
260  *
261  * @adev: amdgpu_device pointer
262  *
263  * Let the UVD memory controller know it's offsets
264  */
265 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
266 {
267         uint64_t offset;
268         uint32_t size;
269
270         /* programm memory controller bits 0-27 */
271         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
272                         lower_32_bits(adev->uvd.gpu_addr));
273         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
274                         upper_32_bits(adev->uvd.gpu_addr));
275
276         offset = AMDGPU_UVD_FIRMWARE_OFFSET;
277         size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
278         WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
279         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
280
281         offset += size;
282         size = AMDGPU_UVD_HEAP_SIZE;
283         WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
284         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
285
286         offset += size;
287         size = AMDGPU_UVD_STACK_SIZE +
288                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
289         WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
290         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
291
292         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
293         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
294         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
295
296         WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
297 }
298
299 #if 0
300 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
301                 bool enable)
302 {
303         u32 data, data1;
304
305         data = RREG32(mmUVD_CGC_GATE);
306         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
307         if (enable) {
308                 data |= UVD_CGC_GATE__SYS_MASK |
309                                 UVD_CGC_GATE__UDEC_MASK |
310                                 UVD_CGC_GATE__MPEG2_MASK |
311                                 UVD_CGC_GATE__RBC_MASK |
312                                 UVD_CGC_GATE__LMI_MC_MASK |
313                                 UVD_CGC_GATE__IDCT_MASK |
314                                 UVD_CGC_GATE__MPRD_MASK |
315                                 UVD_CGC_GATE__MPC_MASK |
316                                 UVD_CGC_GATE__LBSI_MASK |
317                                 UVD_CGC_GATE__LRBBM_MASK |
318                                 UVD_CGC_GATE__UDEC_RE_MASK |
319                                 UVD_CGC_GATE__UDEC_CM_MASK |
320                                 UVD_CGC_GATE__UDEC_IT_MASK |
321                                 UVD_CGC_GATE__UDEC_DB_MASK |
322                                 UVD_CGC_GATE__UDEC_MP_MASK |
323                                 UVD_CGC_GATE__WCB_MASK |
324                                 UVD_CGC_GATE__VCPU_MASK |
325                                 UVD_CGC_GATE__SCPU_MASK;
326                 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
327                                 UVD_SUVD_CGC_GATE__SIT_MASK |
328                                 UVD_SUVD_CGC_GATE__SMP_MASK |
329                                 UVD_SUVD_CGC_GATE__SCM_MASK |
330                                 UVD_SUVD_CGC_GATE__SDB_MASK |
331                                 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
332                                 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
333                                 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
334                                 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
335                                 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
336                                 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
337                                 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
338                                 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
339         } else {
340                 data &= ~(UVD_CGC_GATE__SYS_MASK |
341                                 UVD_CGC_GATE__UDEC_MASK |
342                                 UVD_CGC_GATE__MPEG2_MASK |
343                                 UVD_CGC_GATE__RBC_MASK |
344                                 UVD_CGC_GATE__LMI_MC_MASK |
345                                 UVD_CGC_GATE__LMI_UMC_MASK |
346                                 UVD_CGC_GATE__IDCT_MASK |
347                                 UVD_CGC_GATE__MPRD_MASK |
348                                 UVD_CGC_GATE__MPC_MASK |
349                                 UVD_CGC_GATE__LBSI_MASK |
350                                 UVD_CGC_GATE__LRBBM_MASK |
351                                 UVD_CGC_GATE__UDEC_RE_MASK |
352                                 UVD_CGC_GATE__UDEC_CM_MASK |
353                                 UVD_CGC_GATE__UDEC_IT_MASK |
354                                 UVD_CGC_GATE__UDEC_DB_MASK |
355                                 UVD_CGC_GATE__UDEC_MP_MASK |
356                                 UVD_CGC_GATE__WCB_MASK |
357                                 UVD_CGC_GATE__VCPU_MASK |
358                                 UVD_CGC_GATE__SCPU_MASK);
359                 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
360                                 UVD_SUVD_CGC_GATE__SIT_MASK |
361                                 UVD_SUVD_CGC_GATE__SMP_MASK |
362                                 UVD_SUVD_CGC_GATE__SCM_MASK |
363                                 UVD_SUVD_CGC_GATE__SDB_MASK |
364                                 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
365                                 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
366                                 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
367                                 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
368                                 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
369                                 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
370                                 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
371                                 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
372         }
373         WREG32(mmUVD_CGC_GATE, data);
374         WREG32(mmUVD_SUVD_CGC_GATE, data1);
375 }
376 #endif
377
378 /**
379  * uvd_v6_0_start - start UVD block
380  *
381  * @adev: amdgpu_device pointer
382  *
383  * Setup and start the UVD block
384  */
385 static int uvd_v6_0_start(struct amdgpu_device *adev)
386 {
387         struct amdgpu_ring *ring = &adev->uvd.ring;
388         uint32_t rb_bufsz, tmp;
389         uint32_t lmi_swap_cntl;
390         uint32_t mp_swap_cntl;
391         int i, j, r;
392
393         /* disable DPG */
394         WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
395
396         /* disable byte swapping */
397         lmi_swap_cntl = 0;
398         mp_swap_cntl = 0;
399
400         amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
401         uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
402         uvd_v6_0_enable_mgcg(adev, true);
403         uvd_v6_0_mc_resume(adev);
404
405         /* disable interupt */
406         WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
407
408         /* stall UMC and register bus before resetting VCPU */
409         WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
410         mdelay(1);
411
412         /* put LMI, VCPU, RBC etc... into reset */
413         WREG32(mmUVD_SOFT_RESET,
414                 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
415                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
416                 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
417                 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
418                 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
419                 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
420                 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
421                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
422         mdelay(5);
423
424         /* take UVD block out of reset */
425         WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
426         mdelay(5);
427
428         /* initialize UVD memory controller */
429         WREG32(mmUVD_LMI_CTRL,
430                 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
431                 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
432                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
433                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
434                 UVD_LMI_CTRL__REQ_MODE_MASK |
435                 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
436
437 #ifdef __BIG_ENDIAN
438         /* swap (8 in 32) RB and IB */
439         lmi_swap_cntl = 0xa;
440         mp_swap_cntl = 0;
441 #endif
442         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
443         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
444
445         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
446         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
447         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
448         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
449         WREG32(mmUVD_MPC_SET_ALU, 0);
450         WREG32(mmUVD_MPC_SET_MUX, 0x88);
451
452         /* take all subblocks out of reset, except VCPU */
453         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
454         mdelay(5);
455
456         /* enable VCPU clock */
457         WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
458
459         /* enable UMC */
460         WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
461
462         /* boot up the VCPU */
463         WREG32(mmUVD_SOFT_RESET, 0);
464         mdelay(10);
465
466         for (i = 0; i < 10; ++i) {
467                 uint32_t status;
468
469                 for (j = 0; j < 100; ++j) {
470                         status = RREG32(mmUVD_STATUS);
471                         if (status & 2)
472                                 break;
473                         mdelay(10);
474                 }
475                 r = 0;
476                 if (status & 2)
477                         break;
478
479                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
480                 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
481                 mdelay(10);
482                 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
483                 mdelay(10);
484                 r = -1;
485         }
486
487         if (r) {
488                 DRM_ERROR("UVD not responding, giving up!!!\n");
489                 return r;
490         }
491         /* enable master interrupt */
492         WREG32_P(mmUVD_MASTINT_EN,
493                 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
494                 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
495
496         /* clear the bit 4 of UVD_STATUS */
497         WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
498
499         /* force RBC into idle state */
500         rb_bufsz = order_base_2(ring->ring_size);
501         tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
502         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
503         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
504         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
505         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
506         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
507         WREG32(mmUVD_RBC_RB_CNTL, tmp);
508
509         /* set the write pointer delay */
510         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
511
512         /* set the wb address */
513         WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
514
515         /* programm the RB_BASE for ring buffer */
516         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
517                         lower_32_bits(ring->gpu_addr));
518         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
519                         upper_32_bits(ring->gpu_addr));
520
521         /* Initialize the ring buffer's read and write pointers */
522         WREG32(mmUVD_RBC_RB_RPTR, 0);
523
524         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
525         WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
526
527         WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
528
529         return 0;
530 }
531
532 /**
533  * uvd_v6_0_stop - stop UVD block
534  *
535  * @adev: amdgpu_device pointer
536  *
537  * stop the UVD block
538  */
539 static void uvd_v6_0_stop(struct amdgpu_device *adev)
540 {
541         /* force RBC into idle state */
542         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
543
544         /* Stall UMC and register bus before resetting VCPU */
545         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
546         mdelay(1);
547
548         /* put VCPU into reset */
549         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
550         mdelay(5);
551
552         /* disable VCPU clock */
553         WREG32(mmUVD_VCPU_CNTL, 0x0);
554
555         /* Unstall UMC and register bus */
556         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
557 }
558
559 /**
560  * uvd_v6_0_ring_emit_fence - emit an fence & trap command
561  *
562  * @ring: amdgpu_ring pointer
563  * @fence: fence to emit
564  *
565  * Write a fence and a trap command to the ring.
566  */
567 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
568                                      unsigned flags)
569 {
570         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
571
572         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
573         amdgpu_ring_write(ring, seq);
574         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
575         amdgpu_ring_write(ring, addr & 0xffffffff);
576         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
577         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
578         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
579         amdgpu_ring_write(ring, 0);
580
581         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
582         amdgpu_ring_write(ring, 0);
583         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
584         amdgpu_ring_write(ring, 0);
585         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
586         amdgpu_ring_write(ring, 2);
587 }
588
589 /**
590  * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
591  *
592  * @ring: amdgpu_ring pointer
593  *
594  * Emits an hdp flush.
595  */
596 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
597 {
598         amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
599         amdgpu_ring_write(ring, 0);
600 }
601
602 /**
603  * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
604  *
605  * @ring: amdgpu_ring pointer
606  *
607  * Emits an hdp invalidate.
608  */
609 static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
610 {
611         amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
612         amdgpu_ring_write(ring, 1);
613 }
614
615 /**
616  * uvd_v6_0_ring_test_ring - register write test
617  *
618  * @ring: amdgpu_ring pointer
619  *
620  * Test if we can successfully write to the context register
621  */
622 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
623 {
624         struct amdgpu_device *adev = ring->adev;
625         uint32_t tmp = 0;
626         unsigned i;
627         int r;
628
629         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
630         r = amdgpu_ring_alloc(ring, 3);
631         if (r) {
632                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
633                           ring->idx, r);
634                 return r;
635         }
636         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
637         amdgpu_ring_write(ring, 0xDEADBEEF);
638         amdgpu_ring_commit(ring);
639         for (i = 0; i < adev->usec_timeout; i++) {
640                 tmp = RREG32(mmUVD_CONTEXT_ID);
641                 if (tmp == 0xDEADBEEF)
642                         break;
643                 DRM_UDELAY(1);
644         }
645
646         if (i < adev->usec_timeout) {
647                 DRM_INFO("ring test on %d succeeded in %d usecs\n",
648                          ring->idx, i);
649         } else {
650                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
651                           ring->idx, tmp);
652                 r = -EINVAL;
653         }
654         return r;
655 }
656
657 /**
658  * uvd_v6_0_ring_emit_ib - execute indirect buffer
659  *
660  * @ring: amdgpu_ring pointer
661  * @ib: indirect buffer to execute
662  *
663  * Write ring commands to execute the indirect buffer
664  */
665 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
666                                   struct amdgpu_ib *ib,
667                                   unsigned vm_id, bool ctx_switch)
668 {
669         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
670         amdgpu_ring_write(ring, vm_id);
671
672         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
673         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
674         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
675         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
676         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
677         amdgpu_ring_write(ring, ib->length_dw);
678 }
679
680 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
681                                          unsigned vm_id, uint64_t pd_addr)
682 {
683         uint32_t reg;
684
685         if (vm_id < 8)
686                 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
687         else
688                 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
689
690         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
691         amdgpu_ring_write(ring, reg << 2);
692         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
693         amdgpu_ring_write(ring, pd_addr >> 12);
694         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
695         amdgpu_ring_write(ring, 0x8);
696
697         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
698         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
699         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
700         amdgpu_ring_write(ring, 1 << vm_id);
701         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
702         amdgpu_ring_write(ring, 0x8);
703
704         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
705         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
706         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
707         amdgpu_ring_write(ring, 0);
708         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
709         amdgpu_ring_write(ring, 1 << vm_id); /* mask */
710         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
711         amdgpu_ring_write(ring, 0xC);
712 }
713
714 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
715 {
716         uint32_t seq = ring->fence_drv.sync_seq;
717         uint64_t addr = ring->fence_drv.gpu_addr;
718
719         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
720         amdgpu_ring_write(ring, lower_32_bits(addr));
721         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
722         amdgpu_ring_write(ring, upper_32_bits(addr));
723         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
724         amdgpu_ring_write(ring, 0xffffffff); /* mask */
725         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
726         amdgpu_ring_write(ring, seq);
727         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
728         amdgpu_ring_write(ring, 0xE);
729 }
730
731 static bool uvd_v6_0_is_idle(void *handle)
732 {
733         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
734
735         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
736 }
737
738 static int uvd_v6_0_wait_for_idle(void *handle)
739 {
740         unsigned i;
741         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
742
743         for (i = 0; i < adev->usec_timeout; i++) {
744                 if (uvd_v6_0_is_idle(handle))
745                         return 0;
746         }
747         return -ETIMEDOUT;
748 }
749
750 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
751 static bool uvd_v6_0_check_soft_reset(void *handle)
752 {
753         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
754         u32 srbm_soft_reset = 0;
755         u32 tmp = RREG32(mmSRBM_STATUS);
756
757         if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
758             REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
759             (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
760                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
761
762         if (srbm_soft_reset) {
763                 adev->uvd.srbm_soft_reset = srbm_soft_reset;
764                 return true;
765         } else {
766                 adev->uvd.srbm_soft_reset = 0;
767                 return false;
768         }
769 }
770
771 static int uvd_v6_0_pre_soft_reset(void *handle)
772 {
773         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774
775         if (!adev->uvd.srbm_soft_reset)
776                 return 0;
777
778         uvd_v6_0_stop(adev);
779         return 0;
780 }
781
782 static int uvd_v6_0_soft_reset(void *handle)
783 {
784         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
785         u32 srbm_soft_reset;
786
787         if (!adev->uvd.srbm_soft_reset)
788                 return 0;
789         srbm_soft_reset = adev->uvd.srbm_soft_reset;
790
791         if (srbm_soft_reset) {
792                 u32 tmp;
793
794                 tmp = RREG32(mmSRBM_SOFT_RESET);
795                 tmp |= srbm_soft_reset;
796                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
797                 WREG32(mmSRBM_SOFT_RESET, tmp);
798                 tmp = RREG32(mmSRBM_SOFT_RESET);
799
800                 udelay(50);
801
802                 tmp &= ~srbm_soft_reset;
803                 WREG32(mmSRBM_SOFT_RESET, tmp);
804                 tmp = RREG32(mmSRBM_SOFT_RESET);
805
806                 /* Wait a little for things to settle down */
807                 udelay(50);
808         }
809
810         return 0;
811 }
812
813 static int uvd_v6_0_post_soft_reset(void *handle)
814 {
815         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
816
817         if (!adev->uvd.srbm_soft_reset)
818                 return 0;
819
820         mdelay(5);
821
822         return uvd_v6_0_start(adev);
823 }
824
825 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
826                                         struct amdgpu_irq_src *source,
827                                         unsigned type,
828                                         enum amdgpu_interrupt_state state)
829 {
830         // TODO
831         return 0;
832 }
833
834 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
835                                       struct amdgpu_irq_src *source,
836                                       struct amdgpu_iv_entry *entry)
837 {
838         DRM_DEBUG("IH: UVD TRAP\n");
839         amdgpu_fence_process(&adev->uvd.ring);
840         return 0;
841 }
842
843 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
844 {
845         uint32_t data1, data3;
846
847         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
848         data3 = RREG32(mmUVD_CGC_GATE);
849
850         data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
851                      UVD_SUVD_CGC_GATE__SIT_MASK |
852                      UVD_SUVD_CGC_GATE__SMP_MASK |
853                      UVD_SUVD_CGC_GATE__SCM_MASK |
854                      UVD_SUVD_CGC_GATE__SDB_MASK |
855                      UVD_SUVD_CGC_GATE__SRE_H264_MASK |
856                      UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
857                      UVD_SUVD_CGC_GATE__SIT_H264_MASK |
858                      UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
859                      UVD_SUVD_CGC_GATE__SCM_H264_MASK |
860                      UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
861                      UVD_SUVD_CGC_GATE__SDB_H264_MASK |
862                      UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
863
864         if (enable) {
865                 data3 |= (UVD_CGC_GATE__SYS_MASK       |
866                         UVD_CGC_GATE__UDEC_MASK      |
867                         UVD_CGC_GATE__MPEG2_MASK     |
868                         UVD_CGC_GATE__RBC_MASK       |
869                         UVD_CGC_GATE__LMI_MC_MASK    |
870                         UVD_CGC_GATE__LMI_UMC_MASK   |
871                         UVD_CGC_GATE__IDCT_MASK      |
872                         UVD_CGC_GATE__MPRD_MASK      |
873                         UVD_CGC_GATE__MPC_MASK       |
874                         UVD_CGC_GATE__LBSI_MASK      |
875                         UVD_CGC_GATE__LRBBM_MASK     |
876                         UVD_CGC_GATE__UDEC_RE_MASK   |
877                         UVD_CGC_GATE__UDEC_CM_MASK   |
878                         UVD_CGC_GATE__UDEC_IT_MASK   |
879                         UVD_CGC_GATE__UDEC_DB_MASK   |
880                         UVD_CGC_GATE__UDEC_MP_MASK   |
881                         UVD_CGC_GATE__WCB_MASK       |
882                         UVD_CGC_GATE__JPEG_MASK      |
883                         UVD_CGC_GATE__SCPU_MASK      |
884                         UVD_CGC_GATE__JPEG2_MASK);
885                 /* only in pg enabled, we can gate clock to vcpu*/
886                 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
887                         data3 |= UVD_CGC_GATE__VCPU_MASK;
888
889                 data3 &= ~UVD_CGC_GATE__REGS_MASK;
890         } else {
891                 data3 = 0;
892         }
893
894         WREG32(mmUVD_SUVD_CGC_GATE, data1);
895         WREG32(mmUVD_CGC_GATE, data3);
896 }
897
898 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
899 {
900         uint32_t data, data2;
901
902         data = RREG32(mmUVD_CGC_CTRL);
903         data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
904
905
906         data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
907                   UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
908
909
910         data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
911                 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
912                 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
913
914         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
915                         UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
916                         UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
917                         UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
918                         UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
919                         UVD_CGC_CTRL__SYS_MODE_MASK |
920                         UVD_CGC_CTRL__UDEC_MODE_MASK |
921                         UVD_CGC_CTRL__MPEG2_MODE_MASK |
922                         UVD_CGC_CTRL__REGS_MODE_MASK |
923                         UVD_CGC_CTRL__RBC_MODE_MASK |
924                         UVD_CGC_CTRL__LMI_MC_MODE_MASK |
925                         UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
926                         UVD_CGC_CTRL__IDCT_MODE_MASK |
927                         UVD_CGC_CTRL__MPRD_MODE_MASK |
928                         UVD_CGC_CTRL__MPC_MODE_MASK |
929                         UVD_CGC_CTRL__LBSI_MODE_MASK |
930                         UVD_CGC_CTRL__LRBBM_MODE_MASK |
931                         UVD_CGC_CTRL__WCB_MODE_MASK |
932                         UVD_CGC_CTRL__VCPU_MODE_MASK |
933                         UVD_CGC_CTRL__JPEG_MODE_MASK |
934                         UVD_CGC_CTRL__SCPU_MODE_MASK |
935                         UVD_CGC_CTRL__JPEG2_MODE_MASK);
936         data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
937                         UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
938                         UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
939                         UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
940                         UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
941
942         WREG32(mmUVD_CGC_CTRL, data);
943         WREG32(mmUVD_SUVD_CGC_CTRL, data2);
944 }
945
946 #if 0
947 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
948 {
949         uint32_t data, data1, cgc_flags, suvd_flags;
950
951         data = RREG32(mmUVD_CGC_GATE);
952         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
953
954         cgc_flags = UVD_CGC_GATE__SYS_MASK |
955                 UVD_CGC_GATE__UDEC_MASK |
956                 UVD_CGC_GATE__MPEG2_MASK |
957                 UVD_CGC_GATE__RBC_MASK |
958                 UVD_CGC_GATE__LMI_MC_MASK |
959                 UVD_CGC_GATE__IDCT_MASK |
960                 UVD_CGC_GATE__MPRD_MASK |
961                 UVD_CGC_GATE__MPC_MASK |
962                 UVD_CGC_GATE__LBSI_MASK |
963                 UVD_CGC_GATE__LRBBM_MASK |
964                 UVD_CGC_GATE__UDEC_RE_MASK |
965                 UVD_CGC_GATE__UDEC_CM_MASK |
966                 UVD_CGC_GATE__UDEC_IT_MASK |
967                 UVD_CGC_GATE__UDEC_DB_MASK |
968                 UVD_CGC_GATE__UDEC_MP_MASK |
969                 UVD_CGC_GATE__WCB_MASK |
970                 UVD_CGC_GATE__VCPU_MASK |
971                 UVD_CGC_GATE__SCPU_MASK |
972                 UVD_CGC_GATE__JPEG_MASK |
973                 UVD_CGC_GATE__JPEG2_MASK;
974
975         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
976                                 UVD_SUVD_CGC_GATE__SIT_MASK |
977                                 UVD_SUVD_CGC_GATE__SMP_MASK |
978                                 UVD_SUVD_CGC_GATE__SCM_MASK |
979                                 UVD_SUVD_CGC_GATE__SDB_MASK;
980
981         data |= cgc_flags;
982         data1 |= suvd_flags;
983
984         WREG32(mmUVD_CGC_GATE, data);
985         WREG32(mmUVD_SUVD_CGC_GATE, data1);
986 }
987 #endif
988
989 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
990                                  bool enable)
991 {
992         u32 orig, data;
993
994         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
995                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
996                 data |= 0xfff;
997                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
998
999                 orig = data = RREG32(mmUVD_CGC_CTRL);
1000                 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1001                 if (orig != data)
1002                         WREG32(mmUVD_CGC_CTRL, data);
1003         } else {
1004                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1005                 data &= ~0xfff;
1006                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1007
1008                 orig = data = RREG32(mmUVD_CGC_CTRL);
1009                 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1010                 if (orig != data)
1011                         WREG32(mmUVD_CGC_CTRL, data);
1012         }
1013 }
1014
1015 static int uvd_v6_0_set_clockgating_state(void *handle,
1016                                           enum amd_clockgating_state state)
1017 {
1018         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1019         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1020
1021         if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1022                 return 0;
1023
1024         if (enable) {
1025                 /* wait for STATUS to clear */
1026                 if (uvd_v6_0_wait_for_idle(handle))
1027                         return -EBUSY;
1028                 uvd_v6_0_enable_clock_gating(adev, true);
1029                 /* enable HW gates because UVD is idle */
1030 /*              uvd_v6_0_set_hw_clock_gating(adev); */
1031         } else {
1032                 /* disable HW gating and enable Sw gating */
1033                 uvd_v6_0_enable_clock_gating(adev, false);
1034         }
1035         uvd_v6_0_set_sw_clock_gating(adev);
1036         return 0;
1037 }
1038
1039 static int uvd_v6_0_set_powergating_state(void *handle,
1040                                           enum amd_powergating_state state)
1041 {
1042         /* This doesn't actually powergate the UVD block.
1043          * That's done in the dpm code via the SMC.  This
1044          * just re-inits the block as necessary.  The actual
1045          * gating still happens in the dpm code.  We should
1046          * revisit this when there is a cleaner line between
1047          * the smc and the hw blocks
1048          */
1049         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050
1051         if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1052                 return 0;
1053
1054         WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1055
1056         if (state == AMD_PG_STATE_GATE) {
1057                 uvd_v6_0_stop(adev);
1058                 return 0;
1059         } else {
1060                 return uvd_v6_0_start(adev);
1061         }
1062 }
1063
1064 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1065         .name = "uvd_v6_0",
1066         .early_init = uvd_v6_0_early_init,
1067         .late_init = NULL,
1068         .sw_init = uvd_v6_0_sw_init,
1069         .sw_fini = uvd_v6_0_sw_fini,
1070         .hw_init = uvd_v6_0_hw_init,
1071         .hw_fini = uvd_v6_0_hw_fini,
1072         .suspend = uvd_v6_0_suspend,
1073         .resume = uvd_v6_0_resume,
1074         .is_idle = uvd_v6_0_is_idle,
1075         .wait_for_idle = uvd_v6_0_wait_for_idle,
1076         .check_soft_reset = uvd_v6_0_check_soft_reset,
1077         .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1078         .soft_reset = uvd_v6_0_soft_reset,
1079         .post_soft_reset = uvd_v6_0_post_soft_reset,
1080         .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1081         .set_powergating_state = uvd_v6_0_set_powergating_state,
1082 };
1083
1084 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1085         .type = AMDGPU_RING_TYPE_UVD,
1086         .align_mask = 0xf,
1087         .nop = PACKET0(mmUVD_NO_OP, 0),
1088         .get_rptr = uvd_v6_0_ring_get_rptr,
1089         .get_wptr = uvd_v6_0_ring_get_wptr,
1090         .set_wptr = uvd_v6_0_ring_set_wptr,
1091         .parse_cs = amdgpu_uvd_ring_parse_cs,
1092         .emit_frame_size =
1093                 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1094                 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1095                 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1096                 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1097         .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1098         .emit_ib = uvd_v6_0_ring_emit_ib,
1099         .emit_fence = uvd_v6_0_ring_emit_fence,
1100         .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1101         .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1102         .test_ring = uvd_v6_0_ring_test_ring,
1103         .test_ib = amdgpu_uvd_ring_test_ib,
1104         .insert_nop = amdgpu_ring_insert_nop,
1105         .pad_ib = amdgpu_ring_generic_pad_ib,
1106         .begin_use = amdgpu_uvd_ring_begin_use,
1107         .end_use = amdgpu_uvd_ring_end_use,
1108 };
1109
1110 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1111         .type = AMDGPU_RING_TYPE_UVD,
1112         .align_mask = 0xf,
1113         .nop = PACKET0(mmUVD_NO_OP, 0),
1114         .get_rptr = uvd_v6_0_ring_get_rptr,
1115         .get_wptr = uvd_v6_0_ring_get_wptr,
1116         .set_wptr = uvd_v6_0_ring_set_wptr,
1117         .emit_frame_size =
1118                 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1119                 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1120                 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1121                 20 + /* uvd_v6_0_ring_emit_vm_flush */
1122                 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1123         .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1124         .emit_ib = uvd_v6_0_ring_emit_ib,
1125         .emit_fence = uvd_v6_0_ring_emit_fence,
1126         .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1127         .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1128         .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1129         .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1130         .test_ring = uvd_v6_0_ring_test_ring,
1131         .test_ib = amdgpu_uvd_ring_test_ib,
1132         .insert_nop = amdgpu_ring_insert_nop,
1133         .pad_ib = amdgpu_ring_generic_pad_ib,
1134         .begin_use = amdgpu_uvd_ring_begin_use,
1135         .end_use = amdgpu_uvd_ring_end_use,
1136 };
1137
1138 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1139 {
1140         if (adev->asic_type >= CHIP_POLARIS10) {
1141                 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1142                 DRM_INFO("UVD is enabled in VM mode\n");
1143         } else {
1144                 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1145                 DRM_INFO("UVD is enabled in physical mode\n");
1146         }
1147 }
1148
1149 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1150         .set = uvd_v6_0_set_interrupt_state,
1151         .process = uvd_v6_0_process_interrupt,
1152 };
1153
1154 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1155 {
1156         adev->uvd.irq.num_types = 1;
1157         adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1158 }
1159
1160 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1161 {
1162                 .type = AMD_IP_BLOCK_TYPE_UVD,
1163                 .major = 6,
1164                 .minor = 0,
1165                 .rev = 0,
1166                 .funcs = &uvd_v6_0_ip_funcs,
1167 };
1168
1169 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1170 {
1171                 .type = AMD_IP_BLOCK_TYPE_UVD,
1172                 .major = 6,
1173                 .minor = 2,
1174                 .rev = 0,
1175                 .funcs = &uvd_v6_0_ip_funcs,
1176 };
1177
1178 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1179 {
1180                 .type = AMD_IP_BLOCK_TYPE_UVD,
1181                 .major = 6,
1182                 .minor = 3,
1183                 .rev = 0,
1184                 .funcs = &uvd_v6_0_ip_funcs,
1185 };
This page took 0.106876 seconds and 4 git commands to generate.